summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--dx/src/com/android/dx/dex/SizeOf.java7
-rw-r--r--dx/src/com/android/dx/io/Code.java22
-rw-r--r--dx/src/com/android/dx/io/DexBuffer.java83
-rw-r--r--dx/src/com/android/dx/merge/DexMerger.java144
-rw-r--r--libdex/Android.mk7
-rw-r--r--libdex/OptInvocation.cpp32
-rw-r--r--libdex/sha1.cpp4
-rw-r--r--tests/083-jit-regressions/expected.txt2
-rw-r--r--tests/083-jit-regressions/src/Main.java20
-rw-r--r--tests/083-jit-regressions/src/ZeroTests.java72
-rwxr-xr-xtests/etc/host-run-test-jar2
-rwxr-xr-xtests/etc/push-and-run-test-jar4
-rwxr-xr-xtests/run-test4
-rw-r--r--vm/Init.cpp14
-rw-r--r--vm/InlineNative.cpp80
-rw-r--r--vm/InlineNative.h9
-rw-r--r--vm/LinearAlloc.cpp6
-rw-r--r--vm/Misc.cpp7
-rw-r--r--vm/StdioConverter.cpp6
-rw-r--r--vm/Sync.cpp6
-rw-r--r--vm/Thread.cpp12
-rw-r--r--vm/alloc/Alloc.h20
-rw-r--r--vm/alloc/Heap.cpp29
-rw-r--r--vm/alloc/Heap.h2
-rw-r--r--vm/alloc/HeapSource.cpp80
-rw-r--r--vm/analysis/CodeVerify.cpp7
-rw-r--r--vm/analysis/DexPrepare.cpp4
-rw-r--r--vm/analysis/VfyBasicBlock.cpp5
-rw-r--r--vm/compiler/Compiler.cpp6
-rw-r--r--vm/compiler/codegen/arm/CalloutHelper.h7
-rw-r--r--vm/compiler/codegen/arm/Codegen.h2
-rw-r--r--vm/compiler/codegen/arm/CodegenDriver.cpp54
-rw-r--r--vm/compiler/codegen/arm/FP/ThumbVFP.cpp3
-rw-r--r--vm/compiler/codegen/arm/GlobalOptimizations.cpp2
-rw-r--r--vm/compiler/codegen/arm/armv6-vfp/ArchVariant.cpp112
-rw-r--r--vm/compiler/codegen/arm/armv6-vfp/ArchVariant.h34
-rw-r--r--vm/compiler/codegen/arm/armv6-vfp/CallingConvention.S34
-rw-r--r--vm/compiler/codegen/arm/armv6-vfp/Codegen.cpp56
-rw-r--r--vm/compiler/codegen/arm/armv6j/ArchVariant.cpp112
-rw-r--r--vm/compiler/codegen/arm/armv6j/ArchVariant.h34
-rw-r--r--vm/compiler/codegen/arm/armv6j/CallingConvention.S32
-rw-r--r--vm/compiler/codegen/arm/armv6j/Codegen.cpp56
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S8
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S8
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S4
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S8
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S6
-rw-r--r--vm/compiler/template/armv5te/TEMPLATE_RETURN.S4
-rw-r--r--vm/compiler/template/armv5te/footer.S20
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_ADD_DOUBLE_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_ADD_FLOAT_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_CMPG_DOUBLE_VFP.S33
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_CMPG_FLOAT_VFP.S32
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_CMPL_DOUBLE_VFP.S32
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_CMPL_FLOAT_VFP.S32
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_DIV_DOUBLE_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_DIV_FLOAT_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_DOUBLE_TO_INT_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_FLOAT_TO_INT_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_INT_TO_DOUBLE_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_INT_TO_FLOAT_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_MEM_OP_DECODE.S19
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_MUL_DOUBLE_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_MUL_FLOAT_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_RESTORE_STATE.S11
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_SAVE_STATE.S23
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_SQRT_DOUBLE_VFP.S23
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_SUB_DOUBLE_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TEMPLATE_SUB_FLOAT_VFP.S2
-rw-r--r--vm/compiler/template/armv6-vfp/TemplateOpList.h65
-rw-r--r--vm/compiler/template/armv6-vfp/fbinop.S14
-rw-r--r--vm/compiler/template/armv6-vfp/fbinopWide.S14
-rw-r--r--vm/compiler/template/armv6-vfp/funop.S15
-rw-r--r--vm/compiler/template/armv6-vfp/funopNarrower.S15
-rw-r--r--vm/compiler/template/armv6-vfp/funopWider.S15
-rw-r--r--vm/compiler/template/armv6-vfp/platform.S5
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_CMPG_DOUBLE.S1
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_CMPG_FLOAT.S1
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_CMPL_DOUBLE.S38
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_CMPL_FLOAT.S56
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_CMP_LONG.S33
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_INTERPRET.S30
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_CHAIN.S49
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S3
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NATIVE.S83
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S3
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NO_OPT.S60
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S3
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S58
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S3
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_MEM_OP_DECODE.S17
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_MONITOR_ENTER.S21
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_MONITOR_ENTER_DEBUG.S28
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_MUL_LONG.S28
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_PERIODIC_PROFILING.S26
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_RESTORE_STATE.S8
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_RETURN.S57
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_RETURN_PROF.S3
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_SAVE_STATE.S21
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_SHL_LONG.S15
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_SHR_LONG.S15
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_STRING_COMPARETO.S133
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_STRING_INDEXOF.S112
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_THROW_EXCEPTION_COMMON.S6
-rw-r--r--vm/compiler/template/armv6j/TEMPLATE_USHR_LONG.S15
-rw-r--r--vm/compiler/template/armv6j/TemplateOpList.h50
-rw-r--r--vm/compiler/template/armv6j/footer.S129
-rw-r--r--vm/compiler/template/armv6j/header.S95
-rw-r--r--vm/compiler/template/armv6j/platform.S5
-rw-r--r--vm/compiler/template/config-armv6-vfp68
-rw-r--r--vm/compiler/template/config-armv6j68
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S64
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv5te.S96
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv6-vfp.S1981
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv6j.S1712
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S64
-rw-r--r--vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S64
-rwxr-xr-xvm/compiler/template/rebuild.sh2
-rw-r--r--vm/dalvik2
-rw-r--r--vm/hprof/HprofClass.cpp4
-rw-r--r--vm/interp/Interp.cpp18
-rw-r--r--vm/jdwp/JdwpAdb.cpp4
-rw-r--r--vm/jdwp/JdwpHandler.cpp14
-rw-r--r--vm/jdwp/JdwpSocket.cpp4
-rw-r--r--vm/mterp/armv5te/footer.S8
-rw-r--r--vm/mterp/common/jit-config.h2
-rw-r--r--vm/mterp/config-armv6-vfp108
-rw-r--r--vm/mterp/config-armv6j78
-rw-r--r--vm/mterp/out/InterpAsm-armv5te-vfp.S8
-rw-r--r--vm/mterp/out/InterpAsm-armv5te.S8
-rw-r--r--vm/mterp/out/InterpAsm-armv6-vfp.S16866
-rw-r--r--vm/mterp/out/InterpAsm-armv6j.S17324
-rw-r--r--vm/mterp/out/InterpAsm-armv7-a-neon.S8
-rw-r--r--vm/mterp/out/InterpAsm-armv7-a.S8
-rw-r--r--vm/mterp/out/InterpC-armv6-vfp.cpp1249
-rw-r--r--vm/mterp/out/InterpC-armv6j.cpp1249
-rwxr-xr-xvm/mterp/rebuild.sh2
-rw-r--r--vm/native/dalvik_system_VMRuntime.cpp35
-rw-r--r--vm/native/dalvik_system_Zygote.cpp63
-rw-r--r--vm/oo/Resolve.cpp15
-rw-r--r--vm/reflect/Annotation.cpp8
143 files changed, 43714 insertions, 363 deletions
diff --git a/dx/src/com/android/dx/dex/SizeOf.java b/dx/src/com/android/dx/dex/SizeOf.java
index 476f7bbcd..6ded78282 100644
--- a/dx/src/com/android/dx/dex/SizeOf.java
+++ b/dx/src/com/android/dx/dex/SizeOf.java
@@ -100,4 +100,11 @@ public final class SizeOf {
* offset uint
*/
public static final int MAP_ITEM = USHORT + USHORT + UINT + UINT;
+
+ /**
+ * start_addr uint
+ * insn_count ushort
+ * handler_off ushort
+ */
+ public static final int TRY_ITEM = UINT + USHORT + USHORT;
}
diff --git a/dx/src/com/android/dx/io/Code.java b/dx/src/com/android/dx/io/Code.java
index ba95d1b92..82da86267 100644
--- a/dx/src/com/android/dx/io/Code.java
+++ b/dx/src/com/android/dx/io/Code.java
@@ -67,12 +67,12 @@ public final class Code {
public static class Try {
final int startAddress;
final int instructionCount;
- final int handlerOffset;
+ final int catchHandlerIndex;
- Try(int startAddress, int instructionCount, int handlerOffset) {
+ Try(int startAddress, int instructionCount, int catchHandlerIndex) {
this.startAddress = startAddress;
this.instructionCount = instructionCount;
- this.handlerOffset = handlerOffset;
+ this.catchHandlerIndex = catchHandlerIndex;
}
public int getStartAddress() {
@@ -83,8 +83,12 @@ public final class Code {
return instructionCount;
}
- public int getHandlerOffset() {
- return handlerOffset;
+ /**
+ * Returns this try's catch handler <strong>index</strong>. Note that
+ * this is distinct from the its catch handler <strong>offset</strong>.
+ */
+ public int getCatchHandlerIndex() {
+ return catchHandlerIndex;
}
}
@@ -92,11 +96,13 @@ public final class Code {
final int[] typeIndexes;
final int[] addresses;
final int catchAllAddress;
+ final int offset;
- public CatchHandler(int[] typeIndexes, int[] addresses, int catchAllAddress) {
+ public CatchHandler(int[] typeIndexes, int[] addresses, int catchAllAddress, int offset) {
this.typeIndexes = typeIndexes;
this.addresses = addresses;
this.catchAllAddress = catchAllAddress;
+ this.offset = offset;
}
public int[] getTypeIndexes() {
@@ -110,5 +116,9 @@ public final class Code {
public int getCatchAllAddress() {
return catchAllAddress;
}
+
+ public int getOffset() {
+ return offset;
+ }
}
}
diff --git a/dx/src/com/android/dx/io/DexBuffer.java b/dx/src/com/android/dx/io/DexBuffer.java
index 2420cfd9f..9fbc78cac 100644
--- a/dx/src/com/android/dx/io/DexBuffer.java
+++ b/dx/src/com/android/dx/io/DexBuffer.java
@@ -19,6 +19,8 @@ package com.android.dx.io;
import com.android.dx.dex.DexFormat;
import com.android.dx.dex.SizeOf;
import com.android.dx.dex.TableOfContents;
+import com.android.dx.io.Code.CatchHandler;
+import com.android.dx.io.Code.Try;
import com.android.dx.merge.TypeList;
import com.android.dx.util.ByteInput;
import com.android.dx.util.ByteOutput;
@@ -362,6 +364,10 @@ public final class DexBuffer {
return Leb128Utils.readUnsignedLeb128(this);
}
+ public int readUleb128p1() {
+ return Leb128Utils.readUnsignedLeb128(this) - 1;
+ }
+
public int readSleb128() {
return Leb128Utils.readSignedLeb128(this);
}
@@ -439,31 +445,64 @@ public final class DexBuffer {
int debugInfoOffset = readInt();
int instructionsSize = readInt();
short[] instructions = readShortArray(instructionsSize);
- Code.Try[] tries = new Code.Try[triesSize];
- Code.CatchHandler[] catchHandlers = new Code.CatchHandler[0];
+ Try[] tries;
+ CatchHandler[] catchHandlers;
if (triesSize > 0) {
if (instructions.length % 2 == 1) {
readShort(); // padding
}
- for (int i = 0; i < triesSize; i++) {
- int startAddress = readInt();
- int instructionCount = readUnsignedShort();
- int handlerOffset = readUnsignedShort();
- tries[i] = new Code.Try(startAddress, instructionCount, handlerOffset);
- }
-
- int catchHandlersSize = readUleb128();
- catchHandlers = new Code.CatchHandler[catchHandlersSize];
- for (int i = 0; i < catchHandlersSize; i++) {
- catchHandlers[i] = readCatchHandler();
- }
+ /*
+ * We can't read the tries until we've read the catch handlers.
+ * Unfortunately they're in the opposite order in the dex file
+ * so we need to read them out-of-order.
+ */
+ Section triesSection = open(position);
+ skip(triesSize * SizeOf.TRY_ITEM);
+ catchHandlers = readCatchHandlers();
+ tries = triesSection.readTries(triesSize, catchHandlers);
+ } else {
+ tries = new Try[0];
+ catchHandlers = new CatchHandler[0];
}
return new Code(registersSize, insSize, outsSize, debugInfoOffset, instructions,
tries, catchHandlers);
}
- private Code.CatchHandler readCatchHandler() {
+ private CatchHandler[] readCatchHandlers() {
+ int baseOffset = position;
+ int catchHandlersSize = readUleb128();
+ CatchHandler[] result = new CatchHandler[catchHandlersSize];
+ for (int i = 0; i < catchHandlersSize; i++) {
+ int offset = position - baseOffset;
+ result[i] = readCatchHandler(offset);
+ }
+ return result;
+ }
+
+ private Try[] readTries(int triesSize, CatchHandler[] catchHandlers) {
+ Try[] result = new Try[triesSize];
+ for (int i = 0; i < triesSize; i++) {
+ int startAddress = readInt();
+ int instructionCount = readUnsignedShort();
+ int handlerOffset = readUnsignedShort();
+ int catchHandlerIndex = findCatchHandlerIndex(catchHandlers, handlerOffset);
+ result[i] = new Try(startAddress, instructionCount, catchHandlerIndex);
+ }
+ return result;
+ }
+
+ private int findCatchHandlerIndex(CatchHandler[] catchHandlers, int offset) {
+ for (int i = 0; i < catchHandlers.length; i++) {
+ CatchHandler catchHandler = catchHandlers[i];
+ if (catchHandler.getOffset() == offset) {
+ return i;
+ }
+ }
+ throw new IllegalArgumentException();
+ }
+
+ private CatchHandler readCatchHandler(int offset) {
int size = readSleb128();
int handlersCount = Math.abs(size);
int[] typeIndexes = new int[handlersCount];
@@ -473,7 +512,7 @@ public final class DexBuffer {
addresses[i] = readUleb128();
}
int catchAllAddress = size <= 0 ? readUleb128() : -1;
- return new Code.CatchHandler(typeIndexes, addresses, catchAllAddress);
+ return new CatchHandler(typeIndexes, addresses, catchAllAddress, offset);
}
private ClassData readClassData() {
@@ -544,6 +583,14 @@ public final class DexBuffer {
}
}
+ public void skip(int count) {
+ if (count < 0) {
+ throw new IllegalArgumentException();
+ }
+ ensureCapacity(count);
+ position += count;
+ }
+
/**
* Writes 0x00 until the position is aligned to a multiple of 4.
*/
@@ -611,6 +658,10 @@ public final class DexBuffer {
}
}
+ public void writeUleb128p1(int i) {
+ writeUleb128(i + 1);
+ }
+
public void writeSleb128(int i) {
try {
Leb128Utils.writeSignedLeb128(this, i);
diff --git a/dx/src/com/android/dx/merge/DexMerger.java b/dx/src/com/android/dx/merge/DexMerger.java
index cd7f07ffa..fc4d14513 100644
--- a/dx/src/com/android/dx/merge/DexMerger.java
+++ b/dx/src/com/android/dx/merge/DexMerger.java
@@ -33,9 +33,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
-import java.util.HashSet;
import java.util.List;
-import java.util.Set;
/**
* Combine two dex files into one.
@@ -792,11 +790,16 @@ public final class DexMerger {
codeOut.writeUnsignedShort(code.getOutsSize());
Code.Try[] tries = code.getTries();
+ Code.CatchHandler[] catchHandlers = code.getCatchHandlers();
codeOut.writeUnsignedShort(tries.length);
- // TODO: retain debug info
- // code.getDebugInfoOffset();
- codeOut.writeInt(0);
+ int debugInfoOffset = code.getDebugInfoOffset();
+ if (debugInfoOffset != 0) {
+ codeOut.writeInt(debugInfoOut.getPosition());
+ transformDebugInfoItem(in.open(debugInfoOffset), indexMap);
+ } else {
+ codeOut.writeInt(0);
+ }
short[] instructions = code.getInstructions();
InstructionTransformer transformer = (in == dexA)
@@ -810,15 +813,120 @@ public final class DexMerger {
if (newInstructions.length % 2 == 1) {
codeOut.writeShort((short) 0); // padding
}
- for (Code.Try tryItem : tries) {
- codeOut.writeInt(tryItem.getStartAddress());
- codeOut.writeUnsignedShort(tryItem.getInstructionCount());
- codeOut.writeUnsignedShort(tryItem.getHandlerOffset());
- }
- Code.CatchHandler[] catchHandlers = code.getCatchHandlers();
- codeOut.writeUleb128(catchHandlers.length);
- for (Code.CatchHandler catchHandler : catchHandlers) {
- transformEncodedCatchHandler(catchHandler, indexMap);
+
+ /*
+ * We can't write the tries until we've written the catch handlers.
+ * Unfortunately they're in the opposite order in the dex file so we
+ * need to transform them out-of-order.
+ */
+ DexBuffer.Section triesSection = dexOut.open(codeOut.getPosition());
+ codeOut.skip(tries.length * SizeOf.TRY_ITEM);
+ int[] offsets = transformCatchHandlers(indexMap, catchHandlers);
+ transformTries(triesSection, tries, offsets);
+ }
+ }
+
+ /**
+ * Writes the catch handlers to {@code codeOut} and returns their indices.
+ */
+ private int[] transformCatchHandlers(IndexMap indexMap, Code.CatchHandler[] catchHandlers) {
+ int baseOffset = codeOut.getPosition();
+ codeOut.writeUleb128(catchHandlers.length);
+ int[] offsets = new int[catchHandlers.length];
+ for (int i = 0; i < catchHandlers.length; i++) {
+ offsets[i] = codeOut.getPosition() - baseOffset;
+ transformEncodedCatchHandler(catchHandlers[i], indexMap);
+ }
+ return offsets;
+ }
+
+ private void transformTries(DexBuffer.Section out, Code.Try[] tries,
+ int[] catchHandlerOffsets) {
+ for (Code.Try tryItem : tries) {
+ out.writeInt(tryItem.getStartAddress());
+ out.writeUnsignedShort(tryItem.getInstructionCount());
+ out.writeUnsignedShort(catchHandlerOffsets[tryItem.getCatchHandlerIndex()]);
+ }
+ }
+
+ private static final byte DBG_END_SEQUENCE = 0x00;
+ private static final byte DBG_ADVANCE_PC = 0x01;
+ private static final byte DBG_ADVANCE_LINE = 0x02;
+ private static final byte DBG_START_LOCAL = 0x03;
+ private static final byte DBG_START_LOCAL_EXTENDED = 0x04;
+ private static final byte DBG_END_LOCAL = 0x05;
+ private static final byte DBG_RESTART_LOCAL = 0x06;
+ private static final byte DBG_SET_PROLOGUE_END = 0x07;
+ private static final byte DBG_SET_EPILOGUE_BEGIN = 0x08;
+ private static final byte DBG_SET_FILE = 0x09;
+
+ private void transformDebugInfoItem(DexBuffer.Section in, IndexMap indexMap) {
+ contentsOut.debugInfos.size++;
+ int lineStart = in.readUleb128();
+ debugInfoOut.writeUleb128(lineStart);
+
+ int parametersSize = in.readUleb128();
+ debugInfoOut.writeUleb128(parametersSize);
+
+ for (int p = 0; p < parametersSize; p++) {
+ int parameterName = in.readUleb128p1();
+ debugInfoOut.writeUleb128p1(indexMap.adjustString(parameterName));
+ }
+
+ int addrDiff; // uleb128 address delta.
+ int lineDiff; // sleb128 line delta.
+ int registerNum; // uleb128 register number.
+ int nameIndex; // uleb128p1 string index. Needs indexMap adjustment.
+ int typeIndex; // uleb128p1 type index. Needs indexMap adjustment.
+ int sigIndex; // uleb128p1 string index. Needs indexMap adjustment.
+
+ while (true) {
+ int opcode = in.readByte();
+ debugInfoOut.writeByte(opcode);
+
+ switch (opcode) {
+ case DBG_END_SEQUENCE:
+ return;
+
+ case DBG_ADVANCE_PC:
+ addrDiff = in.readUleb128();
+ debugInfoOut.writeUleb128(addrDiff);
+ break;
+
+ case DBG_ADVANCE_LINE:
+ lineDiff = in.readSleb128();
+ debugInfoOut.writeSleb128(lineDiff);
+ break;
+
+ case DBG_START_LOCAL:
+ case DBG_START_LOCAL_EXTENDED:
+ registerNum = in.readUleb128();
+ debugInfoOut.writeUleb128(registerNum);
+ nameIndex = in.readUleb128p1();
+ debugInfoOut.writeUleb128p1(indexMap.adjustString(nameIndex));
+ typeIndex = in.readUleb128p1();
+ debugInfoOut.writeUleb128p1(indexMap.adjustType(typeIndex));
+ if (opcode == DBG_START_LOCAL_EXTENDED) {
+ sigIndex = in.readUleb128p1();
+ debugInfoOut.writeUleb128p1(indexMap.adjustString(sigIndex));
+ }
+ break;
+
+ case DBG_END_LOCAL:
+ case DBG_RESTART_LOCAL:
+ registerNum = in.readUleb128();
+ debugInfoOut.writeUleb128(registerNum);
+ break;
+
+ case DBG_SET_FILE:
+ nameIndex = in.readUleb128p1();
+ debugInfoOut.writeUleb128p1(indexMap.adjustString(nameIndex));
+ break;
+
+ case DBG_SET_PROLOGUE_END:
+ case DBG_SET_EPILOGUE_BEGIN:
+ default:
+ break;
}
}
}
@@ -910,7 +1018,6 @@ public final class DexMerger {
mapList = SizeOf.UINT + (contents.sections.length * SizeOf.MAP_ITEM);
typeList += contents.typeLists.byteCount;
stringData += contents.stringDatas.byteCount;
- debugInfo += contents.debugInfos.byteCount;
annotationsDirectory += contents.annotationsDirectories.byteCount;
annotationsSet += contents.annotationSets.byteCount;
annotationsSetRefList += contents.annotationSetRefLists.byteCount;
@@ -920,6 +1027,7 @@ public final class DexMerger {
classData += contents.classDatas.byteCount;
encodedArray += contents.encodedArrays.byteCount;
annotation += contents.annotations.byteCount;
+ debugInfo += contents.debugInfos.byteCount;
} else {
// at most 1/4 of the bytes in a code section are uleb/sleb
code += (int) Math.ceil(contents.codes.byteCount * 1.25);
@@ -927,8 +1035,10 @@ public final class DexMerger {
classData += (int) Math.ceil(contents.classDatas.byteCount * 1.34);
// all of the bytes in an encoding arrays section may be uleb/sleb
encodedArray += contents.encodedArrays.byteCount * 2;
- // at most 1/3 of the bytes in an encoding arrays section are uleb/sleb
- annotation += (int) Math.ceil(contents.annotations.byteCount * 1.34);
+ // all of the bytes in an annotations section may be uleb/sleb
+ annotation += (int) Math.ceil(contents.annotations.byteCount * 2);
+ // all of the bytes in a debug info section may be uleb/sleb
+ debugInfo += contents.debugInfos.byteCount * 2;
}
typeList = DexBuffer.fourByteAlign(typeList);
diff --git a/libdex/Android.mk b/libdex/Android.mk
index bb8d03b96..adae003a8 100644
--- a/libdex/Android.mk
+++ b/libdex/Android.mk
@@ -30,7 +30,6 @@ dex_src_files := \
InstrUtils.cpp \
Leb128.cpp \
OptInvocation.cpp \
- sha1.cpp \
SysUtil.cpp \
ZipArchive.cpp
@@ -49,6 +48,10 @@ ifneq ($(SDK_ONLY),true) # SDK_only doesn't need device version
include $(CLEAR_VARS)
#LOCAL_CFLAGS += -UNDEBUG -DDEBUG=1
+ifneq ($(findstring -O3, $(TARGET_GLOBAL_CFLAGS)),)
+# Workaround for https://bugs.launchpad.net/linaro-android/+bug/948255
+LOCAL_CFLAGS += -O2
+endif
LOCAL_SRC_FILES := $(dex_src_files)
LOCAL_C_INCLUDES += $(dex_include_files)
LOCAL_MODULE_TAGS := optional
@@ -64,7 +67,7 @@ endif # !SDK_ONLY
##
##
include $(CLEAR_VARS)
-LOCAL_SRC_FILES := $(dex_src_files)
+LOCAL_SRC_FILES := $(dex_src_files) sha1.cpp
LOCAL_C_INCLUDES += $(dex_include_files)
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE := libdex
diff --git a/libdex/OptInvocation.cpp b/libdex/OptInvocation.cpp
index bac2f247d..7a25c2954 100644
--- a/libdex/OptInvocation.cpp
+++ b/libdex/OptInvocation.cpp
@@ -32,6 +32,8 @@
#include "OptInvocation.h"
#include "DexFile.h"
+#include <cutils/properties.h>
+
static const char* kCacheDirectoryName = "dalvik-cache";
static const char* kClassesDex = "classes.dex";
@@ -50,7 +52,11 @@ char* dexOptGenerateCacheFileName(const char* fileName, const char* subFileName)
char absoluteFile[sizeof(nameBuf)];
const size_t kBufLen = sizeof(nameBuf) - 1;
const char* dataRoot;
+ const char* dexRoot;
+ const char* cacheRoot;
+ const char* systemRoot;
char* cp;
+ char dexoptDataOnly[PROPERTY_VALUE_MAX];
/*
* Get the absolute path of the Jar or DEX file.
@@ -93,10 +99,34 @@ char* dexOptGenerateCacheFileName(const char* fileName, const char* subFileName)
/* Build the name of the cache directory.
*/
+
+ /* load paths from the system environment */
+ cacheRoot = getenv("ANDROID_CACHE");
dataRoot = getenv("ANDROID_DATA");
+ systemRoot = getenv("ANDROID_ROOT");
+
+ /* make sure we didn't get any NULL values */
+ if (cacheRoot == NULL)
+ cacheRoot = "/cache";
+
if (dataRoot == NULL)
dataRoot = "/data";
- snprintf(nameBuf, kBufLen, "%s/%s", dataRoot, kCacheDirectoryName);
+
+ if (systemRoot == NULL)
+ systemRoot = "/system";
+
+ if (dexRoot == NULL)
+ dexRoot = "/data";
+
+ /* Cache anything stored on /system in cacheRoot, everything else in dataRoot */
+ if (!strncmp(absoluteFile, systemRoot, strlen(systemRoot))) {
+ property_get("dalvik.vm.dexopt-data-only", dexoptDataOnly, "");
+ if (strcmp(dexoptDataOnly, "1") != 0) {
+ dexRoot = cacheRoot;
+ }
+ }
+
+ snprintf(nameBuf, kBufLen, "%s/%s", dexRoot, kCacheDirectoryName);
/* Tack on the file name for the actual cache file path.
*/
diff --git a/libdex/sha1.cpp b/libdex/sha1.cpp
index 15a81cca3..aefa2222e 100644
--- a/libdex/sha1.cpp
+++ b/libdex/sha1.cpp
@@ -142,8 +142,8 @@ union CHAR64LONG16 {
};
CHAR64LONG16* block;
#ifdef SHA1HANDSOFF
-static unsigned char workspace[64];
- block = (CHAR64LONG16*)workspace;
+static CHAR64LONG16 workspace;
+ block = &workspace;
memcpy(block, buffer, 64);
#else
block = (CHAR64LONG16*)buffer;
diff --git a/tests/083-jit-regressions/expected.txt b/tests/083-jit-regressions/expected.txt
index 4b9ad5b2d..7f9f14c65 100644
--- a/tests/083-jit-regressions/expected.txt
+++ b/tests/083-jit-regressions/expected.txt
@@ -2,3 +2,5 @@ b2296099 passes
b2302318 passes
b2487514 passes
b5884080 passes
+longDivTest passes
+longModTest passes
diff --git a/tests/083-jit-regressions/src/Main.java b/tests/083-jit-regressions/src/Main.java
index 3b596dbb5..68bfa3732 100644
--- a/tests/083-jit-regressions/src/Main.java
+++ b/tests/083-jit-regressions/src/Main.java
@@ -25,6 +25,7 @@ public class Main {
b2302318Test();
b2487514Test();
b5884080Test();
+ zeroTest();
}
static void b2296099Test() throws Exception {
@@ -111,13 +112,12 @@ public class Main {
int vA = 1;
int l = 0;
- do
- {
+ do {
int k = 0;
do
vA += 1;
while(++k < 100);
- } while(++l < 1000);
+ } while (++l < 1000);
if (vA == 100001) {
System.out.println("b5884080 passes");
}
@@ -126,6 +126,20 @@ public class Main {
" (expecting 100001)");
}
}
+
+ static void zeroTest() throws Exception {
+ ZeroTests zt = new ZeroTests();
+ try {
+ zt.longDivTest();
+ } catch (Throwable th) {
+ th.printStackTrace();
+ }
+ try {
+ zt.longModTest();
+ } catch (Throwable th) {
+ th.printStackTrace();
+ }
+ }
}
class SpinThread extends Thread {
diff --git a/tests/083-jit-regressions/src/ZeroTests.java b/tests/083-jit-regressions/src/ZeroTests.java
new file mode 100644
index 000000000..57ca151c0
--- /dev/null
+++ b/tests/083-jit-regressions/src/ZeroTests.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Random;
+
+/**
+ * Tests long division by zero for both / and %.
+ */
+public class ZeroTests {
+ public void longDivTest() throws Exception {
+ longTest("longDivTest", true);
+ }
+
+ public void longModTest() throws Exception {
+ longTest("longModTest", false);
+ }
+
+ private static void longTest(String name, boolean divide) throws Exception {
+ // Warm up JIT.
+ for (int i = 0; i < 10000; ++i) {
+ try {
+ // We won't JIT code that hasn't completed successfully, so rhs can't be 0 here!
+ if (divide) {
+ longDiv(1, 1);
+ } else {
+ longMod(1, 1);
+ }
+ } catch (ArithmeticException expected) {
+ throw new AssertionError(name + " threw during warmup");
+ }
+ }
+
+ // Boom?
+ int catchCount = 0;
+ for (int i = 0; i < 10000; ++i) {
+ try {
+ if (divide) {
+ longDiv(1, 0);
+ } else {
+ longMod(1, 0);
+ }
+ throw new AssertionError(name + " failed to throw: " + i);
+ } catch (ArithmeticException expected) {
+ ++catchCount;
+ }
+ }
+ if (catchCount != 10000) throw new AssertionError(name + " failed: " + catchCount);
+
+ System.out.println(name + " passes");
+ }
+
+ private static long longDiv(long lhs, long rhs) {
+ return lhs / rhs;
+ }
+
+ private static long longMod(long lhs, long rhs) {
+ return lhs % rhs;
+ }
+}
diff --git a/tests/etc/host-run-test-jar b/tests/etc/host-run-test-jar
index d3c0fd5ed..ab7278e87 100755
--- a/tests/etc/host-run-test-jar
+++ b/tests/etc/host-run-test-jar
@@ -78,7 +78,7 @@ while true; do
done
if [ "x$INTERP" = "x" ]; then
- INTERP="fast"
+ INTERP="fast" # TODO: change this to "jit" when the x86 jit is merged.
msg "Using fast interpreter by default"
fi
diff --git a/tests/etc/push-and-run-test-jar b/tests/etc/push-and-run-test-jar
index e2fde428e..b64f6a664 100755
--- a/tests/etc/push-and-run-test-jar
+++ b/tests/etc/push-and-run-test-jar
@@ -78,8 +78,8 @@ done
if [ "$ZYGOTE" = "n" ]; then
if [ "x$INTERP" = "x" ]; then
- INTERP="fast"
- msg "Using fast interpreter by default"
+ INTERP="jit"
+ msg "Using jit by default"
fi
if [ "$OPTIMIZE" = "y" ]; then
diff --git a/tests/run-test b/tests/run-test
index fb758d779..406e42484 100755
--- a/tests/run-test
+++ b/tests/run-test
@@ -146,8 +146,8 @@ if [ "$usage" = "yes" ]; then
echo ' Omitting the test name or specifying "-" will use the' \
"current directory."
echo " Runtime Options:"
- echo " --fast Use the fast interpreter (the default)."
- echo " --jit Use the jit."
+ echo " --fast Use the fast interpreter."
+ echo " --jit Use the jit (the default)."
echo " --portable Use the portable interpreter."
echo " --debug Wait for a debugger to attach."
#echo " --gdb Run under gdb; incompatible with some tests."
diff --git a/vm/Init.cpp b/vm/Init.cpp
index 48cc6c1c5..7254af9f2 100644
--- a/vm/Init.cpp
+++ b/vm/Init.cpp
@@ -1165,6 +1165,12 @@ static void blockSignals()
cc = sigaction(SIGBUS, &sa, NULL);
assert(cc == 0);
}
+#ifdef NDEBUG
+ // assert() is defined to nothing - resulting in
+ // cc: variable defined but not used (which breaks
+ // the build if -Werror is on)
+ (void)cc;
+#endif
}
class ScopedShutdown {
@@ -1465,10 +1471,12 @@ static bool initZygote()
*/
bool dvmInitAfterZygote()
{
+#ifndef LOG_NDEBUG
u8 startHeap, startQuit, startJdwp;
u8 endHeap, endQuit, endJdwp;
startHeap = dvmGetRelativeTimeUsec();
+#endif
/*
* Post-zygote heap initialization, including starting
@@ -1477,8 +1485,10 @@ bool dvmInitAfterZygote()
if (!dvmGcStartupAfterZygote())
return false;
+#ifndef LOG_NDEBUG
endHeap = dvmGetRelativeTimeUsec();
startQuit = dvmGetRelativeTimeUsec();
+#endif
/* start signal catcher thread that dumps stacks on SIGQUIT */
if (!gDvm.reduceSignals && !gDvm.noQuitHandler) {
@@ -1492,8 +1502,10 @@ bool dvmInitAfterZygote()
return false;
}
+#ifndef LOG_NDEBUG
endQuit = dvmGetRelativeTimeUsec();
startJdwp = dvmGetRelativeTimeUsec();
+#endif
/*
* Start JDWP thread. If the command-line debugger flags specified
@@ -1504,7 +1516,9 @@ bool dvmInitAfterZygote()
ALOGD("JDWP init failed; continuing anyway");
}
+#ifndef LOG_NDEBUG
endJdwp = dvmGetRelativeTimeUsec();
+#endif
ALOGV("thread-start heap=%d quit=%d jdwp=%d total=%d usec",
(int)(endHeap-startHeap), (int)(endQuit-startQuit),
diff --git a/vm/InlineNative.cpp b/vm/InlineNative.cpp
index 80d522a39..00c1e9554 100644
--- a/vm/InlineNative.cpp
+++ b/vm/InlineNative.cpp
@@ -732,51 +732,41 @@ const InlineOperation gDvmInlineOpsTable[] = {
"Lorg/apache/harmony/dalvik/NativeTestTarget;",
"emptyInlineMethod", "()V" },
- { javaLangString_charAt,
- "Ljava/lang/String;", "charAt", "(I)C" },
- { javaLangString_compareTo,
- "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I" },
- { javaLangString_equals,
- "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z" },
- { javaLangString_fastIndexOf_II,
- "Ljava/lang/String;", "fastIndexOf", "(II)I" },
- { javaLangString_isEmpty,
- "Ljava/lang/String;", "isEmpty", "()Z" },
- { javaLangString_length,
- "Ljava/lang/String;", "length", "()I" },
-
- { javaLangMath_abs_int,
- "Ljava/lang/Math;", "abs", "(I)I" },
- { javaLangMath_abs_long,
- "Ljava/lang/Math;", "abs", "(J)J" },
- { javaLangMath_abs_float,
- "Ljava/lang/Math;", "abs", "(F)F" },
- { javaLangMath_abs_double,
- "Ljava/lang/Math;", "abs", "(D)D" },
- { javaLangMath_min_int,
- "Ljava/lang/Math;", "min", "(II)I" },
- { javaLangMath_max_int,
- "Ljava/lang/Math;", "max", "(II)I" },
- { javaLangMath_sqrt,
- "Ljava/lang/Math;", "sqrt", "(D)D" },
- { javaLangMath_cos,
- "Ljava/lang/Math;", "cos", "(D)D" },
- { javaLangMath_sin,
- "Ljava/lang/Math;", "sin", "(D)D" },
-
- { javaLangFloat_floatToIntBits,
- "Ljava/lang/Float;", "floatToIntBits", "(F)I" },
- { javaLangFloat_floatToRawIntBits,
- "Ljava/lang/Float;", "floatToRawIntBits", "(F)I" },
- { javaLangFloat_intBitsToFloat,
- "Ljava/lang/Float;", "intBitsToFloat", "(I)F" },
-
- { javaLangDouble_doubleToLongBits,
- "Ljava/lang/Double;", "doubleToLongBits", "(D)J" },
- { javaLangDouble_doubleToRawLongBits,
- "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J" },
- { javaLangDouble_longBitsToDouble,
- "Ljava/lang/Double;", "longBitsToDouble", "(J)D" },
+ { javaLangString_charAt, "Ljava/lang/String;", "charAt", "(I)C" },
+ { javaLangString_compareTo, "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I" },
+ { javaLangString_equals, "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z" },
+ { javaLangString_fastIndexOf_II, "Ljava/lang/String;", "fastIndexOf", "(II)I" },
+ { javaLangString_isEmpty, "Ljava/lang/String;", "isEmpty", "()Z" },
+ { javaLangString_length, "Ljava/lang/String;", "length", "()I" },
+
+ { javaLangMath_abs_int, "Ljava/lang/Math;", "abs", "(I)I" },
+ { javaLangMath_abs_long, "Ljava/lang/Math;", "abs", "(J)J" },
+ { javaLangMath_abs_float, "Ljava/lang/Math;", "abs", "(F)F" },
+ { javaLangMath_abs_double, "Ljava/lang/Math;", "abs", "(D)D" },
+ { javaLangMath_min_int, "Ljava/lang/Math;", "min", "(II)I" },
+ { javaLangMath_max_int, "Ljava/lang/Math;", "max", "(II)I" },
+ { javaLangMath_sqrt, "Ljava/lang/Math;", "sqrt", "(D)D" },
+ { javaLangMath_cos, "Ljava/lang/Math;", "cos", "(D)D" },
+ { javaLangMath_sin, "Ljava/lang/Math;", "sin", "(D)D" },
+
+ { javaLangFloat_floatToIntBits, "Ljava/lang/Float;", "floatToIntBits", "(F)I" },
+ { javaLangFloat_floatToRawIntBits, "Ljava/lang/Float;", "floatToRawIntBits", "(F)I" },
+ { javaLangFloat_intBitsToFloat, "Ljava/lang/Float;", "intBitsToFloat", "(I)F" },
+
+ { javaLangDouble_doubleToLongBits, "Ljava/lang/Double;", "doubleToLongBits", "(D)J" },
+ { javaLangDouble_doubleToRawLongBits, "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J" },
+ { javaLangDouble_longBitsToDouble, "Ljava/lang/Double;", "longBitsToDouble", "(J)D" },
+
+ // These are implemented exactly the same in Math and StrictMath,
+ // so we can make the StrictMath calls fast too. Note that this
+ // isn't true in general!
+ { javaLangMath_abs_int, "Ljava/lang/StrictMath;", "abs", "(I)I" },
+ { javaLangMath_abs_long, "Ljava/lang/StrictMath;", "abs", "(J)J" },
+ { javaLangMath_abs_float, "Ljava/lang/StrictMath;", "abs", "(F)F" },
+ { javaLangMath_abs_double, "Ljava/lang/StrictMath;", "abs", "(D)D" },
+ { javaLangMath_min_int, "Ljava/lang/StrictMath;", "min", "(II)I" },
+ { javaLangMath_max_int, "Ljava/lang/StrictMath;", "max", "(II)I" },
+ { javaLangMath_sqrt, "Ljava/lang/StrictMath;", "sqrt", "(D)D" },
};
/*
diff --git a/vm/InlineNative.h b/vm/InlineNative.h
index 101ddd192..fe14f8bf8 100644
--- a/vm/InlineNative.h
+++ b/vm/InlineNative.h
@@ -53,7 +53,7 @@ struct InlineOperation {
};
/*
- * Must be kept in sync w/ gDvmInlineOpsTable in InlineNative.c
+ * Must be kept in sync w/ gDvmInlineOpsTable in InlineNative.cpp
*
* You should also add a test to libcore's IntrinsicTest.
*/
@@ -80,6 +80,13 @@ enum NativeInlineOps {
INLINE_DOUBLE_TO_LONG_BITS = 19,
INLINE_DOUBLE_TO_RAW_LONG_BITS = 20,
INLINE_LONG_BITS_TO_DOUBLE = 21,
+ INLINE_STRICT_MATH_ABS_INT = 22,
+ INLINE_STRICT_MATH_ABS_LONG = 23,
+ INLINE_STRICT_MATH_ABS_FLOAT = 24,
+ INLINE_STRICT_MATH_ABS_DOUBLE = 25,
+ INLINE_STRICT_MATH_MIN_INT = 26,
+ INLINE_STRICT_MATH_MAX_INT = 27,
+ INLINE_STRICT_MATH_SQRT = 28,
};
/*
diff --git a/vm/LinearAlloc.cpp b/vm/LinearAlloc.cpp
index 359893f64..213ba3cf2 100644
--- a/vm/LinearAlloc.cpp
+++ b/vm/LinearAlloc.cpp
@@ -524,6 +524,12 @@ static void updatePages(Object* classLoader, void* mem, int direction)
}
dvmUnlockMutex(&pHdr->lock);
+#ifdef NDEBUG
+ // cc is used only in assert() statements -> not used
+ // in NDEBUG mode -> variable defined but not used
+ // warning (or error with -Werror)
+ (void)cc;
+#endif
}
/*
diff --git a/vm/Misc.cpp b/vm/Misc.cpp
index 1299c22ce..f07684878 100644
--- a/vm/Misc.cpp
+++ b/vm/Misc.cpp
@@ -512,7 +512,12 @@ u8 dvmGetOtherThreadCpuTimeNsec(pthread_t thread)
*/
bool dvmIterativeSleep(int iteration, int maxTotalSleep, u8 relStartTime)
{
- const int minSleep = 10000;
+ /*
+ * Minimum sleep is one millisecond, it is important to keep this value
+ * low to ensure short GC pauses since dvmSuspendAllThreads() uses this
+ * function.
+ */
+ const int minSleep = 1000;
u8 curTime;
int curDelay;
diff --git a/vm/StdioConverter.cpp b/vm/StdioConverter.cpp
index f420c4d13..d261687a8 100644
--- a/vm/StdioConverter.cpp
+++ b/vm/StdioConverter.cpp
@@ -196,6 +196,12 @@ static void* stdioConverterThreadStart(void* arg)
/* change back for shutdown sequence */
dvmChangeStatus(NULL, THREAD_RUNNING);
+#ifdef NDEBUG
+ // cc is used only in assert() statements -> not used in NDEBUG
+ // mode - causing variable defined but not used warning,
+ // breaking the build with -Werror
+ (void)cc;
+#endif
return NULL;
}
diff --git a/vm/Sync.cpp b/vm/Sync.cpp
index 8a3803eb4..d1f3ba843 100644
--- a/vm/Sync.cpp
+++ b/vm/Sync.cpp
@@ -757,6 +757,12 @@ done:
dvmThrowInterruptedException(NULL);
}
}
+#ifdef NDEBUG
+ // ret is used only in assert() statements ==> not used in
+ // NDEBUG builds at all, causing variable defined but not
+ // used warning, breaking the build with -Werror
+ (void)ret;
+#endif
}
/*
diff --git a/vm/Thread.cpp b/vm/Thread.cpp
index d82f15afe..36cba53b7 100644
--- a/vm/Thread.cpp
+++ b/vm/Thread.cpp
@@ -2153,6 +2153,10 @@ void dvmDetachCurrentThread()
// cond var guarded by threadListLock, which we already hold
cc = pthread_cond_signal(&gDvm.vmExitCond);
assert(cc == 0);
+#ifdef NDEBUG
+ // not used -> variable defined but not used warning
+ (void)cc;
+#endif
}
}
@@ -2710,6 +2714,10 @@ void dvmResumeAllThreads(SuspendCause why)
lockThreadSuspendCount();
cc = pthread_cond_broadcast(&gDvm.threadSuspendCountCond);
assert(cc == 0);
+#ifdef NDEBUG
+ // not used -> variable defined but not used warning
+ (void)cc;
+#endif
unlockThreadSuspendCount();
LOG_THREAD("threadid=%d: ResumeAll complete", self->threadId);
@@ -2759,6 +2767,10 @@ void dvmUndoDebuggerSuspensions()
lockThreadSuspendCount();
cc = pthread_cond_broadcast(&gDvm.threadSuspendCountCond);
assert(cc == 0);
+#ifdef NDEBUG
+ // not used -> variable defined but not used warning
+ (void)cc;
+#endif
unlockThreadSuspendCount();
unlockThreadSuspend();
diff --git a/vm/alloc/Alloc.h b/vm/alloc/Alloc.h
index efee1bde3..51eeeb9fe 100644
--- a/vm/alloc/Alloc.h
+++ b/vm/alloc/Alloc.h
@@ -117,6 +117,26 @@ float dvmGetTargetHeapUtilization(void);
void dvmSetTargetHeapUtilization(float newTarget);
/*
+ * Sets HEAP_IDEAL_FREE
+ */
+void dvmSetTargetHeapIdealFree(size_t size);
+
+/*
+ * Gets HEAP_IDEAL_FREE
+ */
+int dvmGetTargetHeapIdealFree();
+
+/*
+ * Sets CONCURRENT_START
+ */
+void dvmSetTargetHeapConcurrentStart(size_t size);
+
+/*
+ * Gets CONCURRENT_START
+ */
+int dvmGetTargetHeapConcurrentStart();
+
+/*
* Initiate garbage collection.
*
* This usually happens automatically, but can also be caused by
diff --git a/vm/alloc/Heap.cpp b/vm/alloc/Heap.cpp
index 1d06dfec0..65b33105d 100644
--- a/vm/alloc/Heap.cpp
+++ b/vm/alloc/Heap.cpp
@@ -31,6 +31,11 @@
#include <limits.h>
#include <errno.h>
+#ifdef LOG_NDDEBUG
+#undef LOG_NDDEBUG
+#define LOG_NDDEBUG 0
+#endif
+
static const GcSpec kGcForMallocSpec = {
true, /* isPartial */
false, /* isConcurrent */
@@ -215,17 +220,13 @@ static void *tryMalloc(size_t size)
* lock, wait for the GC to complete, and retrying allocating.
*/
dvmWaitForConcurrentGcToComplete();
- ptr = dvmHeapSourceAlloc(size);
- if (ptr != NULL) {
- return ptr;
- }
+ } else {
+ /*
+ * Try a foreground GC since a concurrent GC is not currently running.
+ */
+ gcForMalloc(false);
}
- /*
- * Another failure. Our thread was starved or there may be too
- * many live objects. Try a foreground GC. This will have no
- * effect if the concurrent GC is already running.
- */
- gcForMalloc(false);
+
ptr = dvmHeapSourceAlloc(size);
if (ptr != NULL) {
return ptr;
@@ -713,8 +714,9 @@ void dvmCollectGarbageInternal(const GcSpec* spec)
* suspend when the GC thread calls dvmUnlockHeap before dvmResumeAllThreads,
* but there's no risk of deadlock.)
*/
-void dvmWaitForConcurrentGcToComplete()
+bool dvmWaitForConcurrentGcToComplete()
{
+ bool waited = gDvm.gcHeap->gcRunning;
Thread *self = dvmThreadSelf();
assert(self != NULL);
u4 start = dvmGetRelativeTimeMsec();
@@ -724,5 +726,8 @@ void dvmWaitForConcurrentGcToComplete()
dvmChangeStatus(self, oldStatus);
}
u4 end = dvmGetRelativeTimeMsec();
- ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start);
+ if (end - start > 0) {
+ ALOGD("WAIT_FOR_CONCURRENT_GC blocked %ums", end - start);
+ }
+ return waited;
}
diff --git a/vm/alloc/Heap.h b/vm/alloc/Heap.h
index 9875951d4..19e48cd04 100644
--- a/vm/alloc/Heap.h
+++ b/vm/alloc/Heap.h
@@ -92,7 +92,7 @@ void dvmCollectGarbageInternal(const GcSpec *spec);
* re-acquires the heap lock. After returning, no garbage collection
* will be in progress and the heap lock will be held by the caller.
*/
-void dvmWaitForConcurrentGcToComplete(void);
+bool dvmWaitForConcurrentGcToComplete(void);
/*
* Returns true iff <obj> points to a valid allocated object.
diff --git a/vm/alloc/HeapSource.cpp b/vm/alloc/HeapSource.cpp
index 281557721..fc5b075a4 100644
--- a/vm/alloc/HeapSource.cpp
+++ b/vm/alloc/HeapSource.cpp
@@ -39,8 +39,9 @@ static void trimHeaps();
#define HEAP_UTILIZATION_MAX 1024
#define DEFAULT_HEAP_UTILIZATION 512 // Range 1..HEAP_UTILIZATION_MAX
-#define HEAP_IDEAL_FREE (2 * 1024 * 1024)
-#define HEAP_MIN_FREE (HEAP_IDEAL_FREE / 4)
+#define HEAP_IDEAL_FREE_DEFAULT (7.2 * 1024 * 1024)
+static unsigned int heapIdeaFree = HEAP_IDEAL_FREE_DEFAULT;
+#define HEAP_MIN_FREE ((heapIdeaFree) / 4)
/* How long to wait after a GC before performing a heap trim
* operation to reclaim unused pages.
@@ -50,12 +51,13 @@ static void trimHeaps();
/* Start a concurrent collection when free memory falls under this
* many bytes.
*/
-#define CONCURRENT_START (128 << 10)
+#define CONCURRENT_START_DEFAULT (128 << 10)
+static unsigned int concurrentStart= CONCURRENT_START_DEFAULT;
/* The next GC will not be concurrent when free memory after a GC is
* under this many bytes.
*/
-#define CONCURRENT_MIN_FREE (CONCURRENT_START + (128 << 10))
+#define CONCURRENT_MIN_FREE ((concurrentStart) + (128 << 10))
#define HS_BOILERPLATE() \
do { \
@@ -371,11 +373,12 @@ static bool addNewHeap(HeapSource *hs)
return false;
}
+ size_t startSize = gDvm.heapStartingSize;
heap.maximumSize = hs->growthLimit - overhead;
- heap.concurrentStartBytes = HEAP_MIN_FREE - CONCURRENT_START;
+ heap.concurrentStartBytes = startSize - concurrentStart;
heap.base = base;
heap.limit = heap.base + heap.maximumSize;
- heap.msp = createMspace(base, HEAP_MIN_FREE, hs->maximumSize - overhead);
+ heap.msp = createMspace(base, startSize * 2, hs->maximumSize - overhead);
if (heap.msp == NULL) {
return false;
}
@@ -591,6 +594,12 @@ fail:
bool dvmHeapSourceStartupAfterZygote()
{
+ //For each new application forked, we need to reset softLimit and
+ //concurrentStartBytes to be the correct expected value, not the one
+ //inherit from Zygote
+ HeapSource *hs = gHs;
+ hs->softLimit=SIZE_MAX;
+ hs->heaps[0].concurrentStartBytes = mspace_footprint(hs->heaps[0].msp) - concurrentStart;
return gDvm.concurrentMarkSweep ? gcDaemonStartup() : true;
}
@@ -1173,7 +1182,10 @@ static void snapIdealFootprint()
{
HS_BOILERPLATE();
- setIdealFootprint(getSoftFootprint(true));
+ /* Give IDEAL_FREE extra amount of room even for the
+ * snapIdealFootprint case
+ */
+ setIdealFootprint(getSoftFootprint(true) + heapIdeaFree);
}
/*
@@ -1215,6 +1227,49 @@ void dvmSetTargetHeapUtilization(float newTarget)
}
/*
+ * Sets heapIdeaFree
+ */
+void dvmSetTargetHeapIdealFree(size_t size)
+{
+
+ HS_BOILERPLATE();
+ heapIdeaFree = size;
+ LOGD_HEAP("dvmSetTargetHeapIdealFree %d", size );
+}
+
+/*
+ * Gets heapIdeaFree
+ */
+int dvmGetTargetHeapIdealFree()
+{
+
+ HS_BOILERPLATE();
+ LOGD_HEAP("dvmGetTargetHeapIdealFree %d", heapIdeaFree );
+ return heapIdeaFree;
+}
+
+/*
+ * Sets concurrentStart
+ */
+void dvmSetTargetHeapConcurrentStart(size_t size)
+{
+
+ HS_BOILERPLATE();
+ concurrentStart = size;
+ LOGD_HEAP("dvmSetTargetHeapConcurrentStart %d", size );
+}
+
+/*
+ * Gets concurrentStart
+ */
+int dvmGetTargetHeapConcurrentStart()
+{
+
+ HS_BOILERPLATE();
+ LOGD_HEAP("dvmGetTargetHeapConcurrentStart %d", concurrentStart );
+ return concurrentStart;
+}
+/*
* Given the size of a live set, returns the ideal heap size given
* the current target utilization and MIN/MAX values.
*
@@ -1230,14 +1285,16 @@ static size_t getUtilizationTarget(size_t liveSize, size_t targetUtilization)
/* Cap the amount of free space, though, so we don't end up
* with, e.g., 8MB of free space when the live set size hits 8MB.
*/
- if (targetSize > liveSize + HEAP_IDEAL_FREE) {
- targetSize = liveSize + HEAP_IDEAL_FREE;
+ if (targetSize > liveSize + heapIdeaFree) {
+ targetSize = liveSize + heapIdeaFree;
} else if (targetSize < liveSize + HEAP_MIN_FREE) {
targetSize = liveSize + HEAP_MIN_FREE;
}
return targetSize;
}
+#define min(a, b) ((a)>(b)?(b):(a))
+
/*
* Given the current contents of the active heap, increase the allowed
* heap footprint to match the target utilization ratio. This
@@ -1276,7 +1333,10 @@ void dvmHeapSourceGrowForUtilization()
/* Not enough free memory to allow a concurrent GC. */
heap->concurrentStartBytes = SIZE_MAX;
} else {
- heap->concurrentStartBytes = freeBytes - CONCURRENT_START;
+ //For small footprint, we keep the min percentage to start
+ //concurrent GC; for big footprint, we keep the absolute value
+ //of free to start concurrent GC
+ heap->concurrentStartBytes = freeBytes - min(freeBytes * (float)(0.2), concurrentStart);
}
}
diff --git a/vm/analysis/CodeVerify.cpp b/vm/analysis/CodeVerify.cpp
index 4a0d3d71d..a230590db 100644
--- a/vm/analysis/CodeVerify.cpp
+++ b/vm/analysis/CodeVerify.cpp
@@ -3737,7 +3737,7 @@ static bool doCodeVerification(VerifierData* vdata, RegisterTable* regTable)
if (instr == kPackedSwitchSignature ||
instr == kSparseSwitchSignature ||
instr == kArrayDataSignature ||
- (instr == OP_NOP &&
+ (instr == OP_NOP && (insnIdx + 1 < insnsSize) &&
(meth->insns[insnIdx+1] == kPackedSwitchSignature ||
meth->insns[insnIdx+1] == kSparseSwitchSignature ||
meth->insns[insnIdx+1] == kArrayDataSignature)))
@@ -4318,6 +4318,11 @@ static bool verifyInstruction(const Method* meth, InsnFlags* insnFlags,
valueType = primitiveTypeToRegType(
resClass->elementClass->primitiveType);
assert(valueType != kRegTypeUnknown);
+#ifdef NDEBUG
+ // assert is optimized out, leaving valueType defined but
+ // not used, causing a compiler warning -> error on -Werror
+ (void)valueType;
+#endif
/*
* Now verify if the element width in the table matches the element
diff --git a/vm/analysis/DexPrepare.cpp b/vm/analysis/DexPrepare.cpp
index e8112d543..82e2c0605 100644
--- a/vm/analysis/DexPrepare.cpp
+++ b/vm/analysis/DexPrepare.cpp
@@ -1043,7 +1043,9 @@ static void verifyAndOptimizeClasses(DexFile* pDexFile, bool doVerify,
static void verifyAndOptimizeClass(DexFile* pDexFile, ClassObject* clazz,
const DexClassDef* pClassDef, bool doVerify, bool doOpt)
{
+#ifndef LOG_NDEBUG
const char* classDescriptor;
+#endif
bool verified = false;
if (clazz->pDvmDex->pDexFile != pDexFile) {
@@ -1059,7 +1061,9 @@ static void verifyAndOptimizeClass(DexFile* pDexFile, ClassObject* clazz,
return;
}
+#ifndef LOG_NDEBUG
classDescriptor = dexStringByTypeIdx(pDexFile, pClassDef->classIdx);
+#endif
/*
* First, try to verify it.
diff --git a/vm/analysis/VfyBasicBlock.cpp b/vm/analysis/VfyBasicBlock.cpp
index d6c4b79df..346e4206f 100644
--- a/vm/analysis/VfyBasicBlock.cpp
+++ b/vm/analysis/VfyBasicBlock.cpp
@@ -178,6 +178,11 @@ static bool setPredecessors(VerifierData* vdata, VfyBasicBlock* curBlock,
gotBranch = dvmGetBranchOffset(meth, insnFlags, curIdx,
&branchOffset, &unused);
assert(gotBranch);
+#ifdef NDEBUG
+ // assert is optimized out, leaving gotBranch defined but
+ // not used, causing a compiler warning -> error on -Werror
+ (void)gotBranch;
+#endif
absOffset = curIdx + branchOffset;
assert(absOffset >= 0 && (u4) absOffset < vdata->insnsSize);
diff --git a/vm/compiler/Compiler.cpp b/vm/compiler/Compiler.cpp
index b9b310516..73da6a0b5 100644
--- a/vm/compiler/Compiler.cpp
+++ b/vm/compiler/Compiler.cpp
@@ -138,6 +138,9 @@ bool dvmCompilerWorkEnqueue(const u2 *pc, WorkOrderKind kind, void* info)
gDvmJit.compilerQueueLength++;
cc = pthread_cond_signal(&gDvmJit.compilerQueueActivity);
assert(cc == 0);
+#ifdef NDEBUG
+ (void)cc; // prevent error on -Werror
+#endif
dvmUnlockMutex(&gDvmJit.compilerLock);
return result;
@@ -641,6 +644,9 @@ static void *compilerThreadStart(void *arg)
int cc;
cc = pthread_cond_signal(&gDvmJit.compilerQueueEmpty);
assert(cc == 0);
+#ifdef NDEBUG
+ (void)cc; // prevent bug on -Werror
+#endif
pthread_cond_wait(&gDvmJit.compilerQueueActivity,
&gDvmJit.compilerLock);
continue;
diff --git a/vm/compiler/codegen/arm/CalloutHelper.h b/vm/compiler/codegen/arm/CalloutHelper.h
index 079c5f646..cc4c0ae98 100644
--- a/vm/compiler/codegen/arm/CalloutHelper.h
+++ b/vm/compiler/codegen/arm/CalloutHelper.h
@@ -87,6 +87,13 @@ const Method *dvmJitToPatchPredictedChain(const Method *method,
const ClassObject *clazz);
/*
+ * Switch dispatch offset calculation for OP_PACKED_SWITCH & OP_SPARSE_SWITCH
+ * Used in CodegenDriver.c
+ * static s8 findPackedSwitchIndex(const u2* switchData, int testVal, int pc);
+ * static s8 findSparseSwitchIndex(const u2* switchData, int testVal, int pc);
+ */
+
+/*
* Resolve interface callsites - OP_INVOKE_INTERFACE & OP_INVOKE_INTERFACE_RANGE
*
* Originally declared in mterp/common/FindInterface.h and only comment it here
diff --git a/vm/compiler/codegen/arm/Codegen.h b/vm/compiler/codegen/arm/Codegen.h
index e67f3d8a1..7ec921770 100644
--- a/vm/compiler/codegen/arm/Codegen.h
+++ b/vm/compiler/codegen/arm/Codegen.h
@@ -45,7 +45,7 @@ static bool genArithOpDoublePortable(CompilationUnit *cUnit, MIR *mir,
static bool genConversionPortable(CompilationUnit *cUnit, MIR *mir);
-#if defined(__ARM_ARCH_5__)
+#if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_6__)
static void genMonitorPortable(CompilationUnit *cUnit, MIR *mir);
#endif
diff --git a/vm/compiler/codegen/arm/CodegenDriver.cpp b/vm/compiler/codegen/arm/CodegenDriver.cpp
index 40fc96480..1e2c6e9b1 100644
--- a/vm/compiler/codegen/arm/CodegenDriver.cpp
+++ b/vm/compiler/codegen/arm/CodegenDriver.cpp
@@ -670,6 +670,7 @@ static bool genArithOpLong(CompilationUnit *cUnit, MIR *mir,
OpKind firstOp = kOpBkpt;
OpKind secondOp = kOpBkpt;
bool callOut = false;
+ bool checkZero = false;
void *callTgt;
int retReg = r0;
@@ -700,6 +701,7 @@ static bool genArithOpLong(CompilationUnit *cUnit, MIR *mir,
case OP_DIV_LONG_2ADDR:
callOut = true;
retReg = r0;
+ checkZero = true;
callTgt = (void*)__aeabi_ldivmod;
break;
/* NOTE - result is in r2/r3 instead of r0/r1 */
@@ -708,6 +710,7 @@ static bool genArithOpLong(CompilationUnit *cUnit, MIR *mir,
callOut = true;
callTgt = (void*)__aeabi_ldivmod;
retReg = r2;
+ checkZero = true;
break;
case OP_AND_LONG_2ADDR:
case OP_AND_LONG:
@@ -746,9 +749,14 @@ static bool genArithOpLong(CompilationUnit *cUnit, MIR *mir,
} else {
// Adjust return regs in to handle case of rem returning r2/r3
dvmCompilerFlushAllRegs(cUnit); /* Send everything to home location */
+ loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
LOAD_FUNC_ADDR(cUnit, r14lr, (int) callTgt);
- loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ if (checkZero) {
+ int tReg = r12; // Using fixed registers during call sequence
+ opRegRegReg(cUnit, kOpOr, tReg, r2, r3);
+ genRegImmCheck(cUnit, kArmCondEq, tReg, 0, mir->offset, NULL);
+ }
opReg(cUnit, kOpBlx, r14lr);
dvmCompilerClobberCallRegs(cUnit);
if (retReg == r0)
@@ -1382,7 +1390,7 @@ static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir)
opReg(cUnit, kOpBlx, r2);
}
-#if defined(_ARMV5TE) || defined(_ARMV5TE_VFP)
+#if defined(_ARMV5TE) || defined(_ARMV5TE_VFP) || defined(_ARMV6J) || defined(_ARMV6_VFP)
/*
* To prevent a thread in a monitor wait from blocking the Jit from
* resetting the code cache, heavyweight monitor lock will not
@@ -2223,7 +2231,9 @@ static bool handleEasyMultiply(CompilationUnit *cUnit,
// Can we simplify this multiplication?
bool powerOfTwo = false;
bool popCountLE2 = false;
- bool powerOfTwoMinusOne = false;
+#ifndef NDEBUG
+ bool powerOfTwoMinusOne = false; // used only in assert
+#endif
if (lit < 2) {
// Avoid special cases.
return false;
@@ -2232,7 +2242,9 @@ static bool handleEasyMultiply(CompilationUnit *cUnit,
} else if (isPopCountLE2(lit)) {
popCountLE2 = true;
} else if (isPowerOfTwo(lit + 1)) {
+#ifndef NDEBUG
powerOfTwoMinusOne = true;
+#endif
} else {
return false;
}
@@ -2781,16 +2793,16 @@ static bool handleFmt23x(CompilationUnit *cUnit, MIR *mir)
* chaining cell for case default [8 bytes]
* noChain exit
*/
-static u8 findPackedSwitchIndex(const u2* switchData, int testVal, uintptr_t pc)
+static s8 findPackedSwitchIndex(const u2* switchData, int testVal, int pc)
{
int size;
int firstKey;
const int *entries;
int index;
int jumpIndex;
- uintptr_t caseDPCOffset = 0;
+ int caseDPCOffset = 0;
/* In Thumb mode pc is 4 ahead of the "mov r2, pc" instruction */
- uintptr_t chainingPC = (pc + 4) & ~3;
+ int chainingPC = (pc + 4) & ~3;
/*
* Packed switch data format:
@@ -2829,16 +2841,16 @@ static u8 findPackedSwitchIndex(const u2* switchData, int testVal, uintptr_t pc)
}
chainingPC += jumpIndex * CHAIN_CELL_NORMAL_SIZE;
- return (((u8) caseDPCOffset) << 32) | (u8) chainingPC;
+ return (((s8) caseDPCOffset) << 32) | (u8) chainingPC;
}
/* See comments for findPackedSwitchIndex */
-static u8 findSparseSwitchIndex(const u2* switchData, int testVal, uintptr_t pc)
+static s8 findSparseSwitchIndex(const u2* switchData, int testVal, int pc)
{
int size;
const int *keys;
const int *entries;
- uintptr_t chainingPC = (pc + 4) & ~3;
+ int chainingPC = (pc + 4) & ~3;
int i;
/*
@@ -2880,7 +2892,7 @@ static u8 findSparseSwitchIndex(const u2* switchData, int testVal, uintptr_t pc)
int jumpIndex = (i < MAX_CHAINED_SWITCH_CASES) ?
i : MAX_CHAINED_SWITCH_CASES + 1;
chainingPC += jumpIndex * CHAIN_CELL_NORMAL_SIZE;
- return (((u8) entries[i]) << 32) | (u8) chainingPC;
+ return (((s8) entries[i]) << 32) | (u8) chainingPC;
} else if (k > testVal) {
break;
}
@@ -3637,30 +3649,40 @@ static bool handleExecuteInline(CompilationUnit *cUnit, MIR *mir)
return false; /* Nop */
/* These ones we potentially JIT inline. */
+
+ case INLINE_STRING_CHARAT:
+ return genInlinedStringCharAt(cUnit, mir);
case INLINE_STRING_LENGTH:
return genInlinedStringLength(cUnit, mir);
case INLINE_STRING_IS_EMPTY:
return genInlinedStringIsEmpty(cUnit, mir);
+ case INLINE_STRING_COMPARETO:
+ return genInlinedCompareTo(cUnit, mir);
+ case INLINE_STRING_FASTINDEXOF_II:
+ return genInlinedFastIndexOf(cUnit, mir);
+
case INLINE_MATH_ABS_INT:
+ case INLINE_STRICT_MATH_ABS_INT:
return genInlinedAbsInt(cUnit, mir);
case INLINE_MATH_ABS_LONG:
+ case INLINE_STRICT_MATH_ABS_LONG:
return genInlinedAbsLong(cUnit, mir);
case INLINE_MATH_MIN_INT:
+ case INLINE_STRICT_MATH_MIN_INT:
return genInlinedMinMaxInt(cUnit, mir, true);
case INLINE_MATH_MAX_INT:
+ case INLINE_STRICT_MATH_MAX_INT:
return genInlinedMinMaxInt(cUnit, mir, false);
- case INLINE_STRING_CHARAT:
- return genInlinedStringCharAt(cUnit, mir);
case INLINE_MATH_SQRT:
+ case INLINE_STRICT_MATH_SQRT:
return genInlineSqrt(cUnit, mir);
case INLINE_MATH_ABS_FLOAT:
+ case INLINE_STRICT_MATH_ABS_FLOAT:
return genInlinedAbsFloat(cUnit, mir);
case INLINE_MATH_ABS_DOUBLE:
+ case INLINE_STRICT_MATH_ABS_DOUBLE:
return genInlinedAbsDouble(cUnit, mir);
- case INLINE_STRING_COMPARETO:
- return genInlinedCompareTo(cUnit, mir);
- case INLINE_STRING_FASTINDEXOF_II:
- return genInlinedFastIndexOf(cUnit, mir);
+
case INLINE_FLOAT_TO_RAW_INT_BITS:
case INLINE_INT_BITS_TO_FLOAT:
return genInlinedIntFloatConversion(cUnit, mir);
diff --git a/vm/compiler/codegen/arm/FP/ThumbVFP.cpp b/vm/compiler/codegen/arm/FP/ThumbVFP.cpp
index f685f2469..948198797 100644
--- a/vm/compiler/codegen/arm/FP/ThumbVFP.cpp
+++ b/vm/compiler/codegen/arm/FP/ThumbVFP.cpp
@@ -228,16 +228,13 @@ static bool genCmpFP(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
{
TemplateOpcode templateOpcode;
RegLocation rlResult = dvmCompilerGetReturn(cUnit);
- bool wide = true;
switch(mir->dalvikInsn.opcode) {
case OP_CMPL_FLOAT:
templateOpcode = TEMPLATE_CMPL_FLOAT_VFP;
- wide = false;
break;
case OP_CMPG_FLOAT:
templateOpcode = TEMPLATE_CMPG_FLOAT_VFP;
- wide = false;
break;
case OP_CMPL_DOUBLE:
templateOpcode = TEMPLATE_CMPL_DOUBLE_VFP;
diff --git a/vm/compiler/codegen/arm/GlobalOptimizations.cpp b/vm/compiler/codegen/arm/GlobalOptimizations.cpp
index e52bd8a49..45a063105 100644
--- a/vm/compiler/codegen/arm/GlobalOptimizations.cpp
+++ b/vm/compiler/codegen/arm/GlobalOptimizations.cpp
@@ -52,7 +52,7 @@ static void applyRedundantBranchElimination(CompilationUnit *cUnit)
* instruction.
*/
if (!isPseudoOpcode(nextLIR->opcode) ||
- (nextLIR = (ArmLIR *) cUnit->lastLIRInsn))
+ (nextLIR == (ArmLIR *) cUnit->lastLIRInsn))
break;
}
}
diff --git a/vm/compiler/codegen/arm/armv6-vfp/ArchVariant.cpp b/vm/compiler/codegen/arm/armv6-vfp/ArchVariant.cpp
new file mode 100644
index 000000000..3b5c08332
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv6-vfp/ArchVariant.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+extern "C" void dvmCompilerTemplateStart(void);
+
+/*
+ * This file is included by Codegen-armv5te-vfp.c, and implements architecture
+ * variant-specific code.
+ */
+
+/*
+ * Determine the initial instruction set to be used for this trace.
+ * Later components may decide to change this.
+ */
+JitInstructionSetType dvmCompilerInstructionSet(void)
+{
+ return DALVIK_JIT_THUMB;
+}
+
+/* First, declare dvmCompiler_TEMPLATE_XXX for each template */
+#define JIT_TEMPLATE(X) extern "C" void dvmCompiler_TEMPLATE_##X();
+#include "../../../template/armv5te-vfp/TemplateOpList.h"
+#undef JIT_TEMPLATE
+
+/* Architecture-specific initializations and checks go here */
+bool dvmCompilerArchVariantInit(void)
+{
+ int i = 0;
+
+ /*
+ * Then, populate the templateEntryOffsets array with the offsets from the
+ * the dvmCompilerTemplateStart symbol for each template.
+ */
+#define JIT_TEMPLATE(X) templateEntryOffsets[i++] = \
+ (intptr_t) dvmCompiler_TEMPLATE_##X - (intptr_t) dvmCompilerTemplateStart;
+#include "../../../template/armv5te-vfp/TemplateOpList.h"
+#undef JIT_TEMPLATE
+
+ /* Target-specific configuration */
+ gDvmJit.jitTableSize = 1 << 9; // 512
+ gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
+ gDvmJit.threshold = 200;
+ gDvmJit.codeCacheSize = 512*1024;
+
+#if defined(WITH_SELF_VERIFICATION)
+ /* Force into blocking mode */
+ gDvmJit.blockingMode = true;
+ gDvm.nativeDebuggerActive = true;
+#endif
+
+ /* Codegen-specific assumptions */
+ assert(OFFSETOF_MEMBER(ClassObject, vtable) < 128 &&
+ (OFFSETOF_MEMBER(ClassObject, vtable) & 0x3) == 0);
+ assert(OFFSETOF_MEMBER(ArrayObject, length) < 128 &&
+ (OFFSETOF_MEMBER(ArrayObject, length) & 0x3) == 0);
+ assert(OFFSETOF_MEMBER(ArrayObject, contents) < 256);
+
+ /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
+ assert(sizeof(StackSaveArea) < 236);
+
+ /*
+ * EA is calculated by doing "Rn + imm5 << 2". Make sure that the last
+ * offset from the struct is less than 128.
+ */
+ if ((offsetof(Thread, jitToInterpEntries) +
+ sizeof(struct JitToInterpEntries)) >= 128) {
+ ALOGE("Thread.jitToInterpEntries size overflow");
+ dvmAbort();
+ }
+
+ /* No method JIT for Thumb backend */
+ gDvmJit.disableOpt |= (1 << kMethodJit);
+
+ // Make sure all threads have current values
+ dvmJitUpdateThreadStateAll();
+
+ return true;
+}
+
+int dvmCompilerTargetOptHint(int key)
+{
+ int res;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 2;
+ break;
+ default:
+ ALOGE("Unknown target optimization hint key: %d",key);
+ res = 0;
+ }
+ return res;
+}
+
+void dvmCompilerGenMemBarrier(CompilationUnit *cUnit, int barrierKind)
+{
+#if ANDROID_SMP != 0
+#error armv5+smp not supported
+#endif
+}
diff --git a/vm/compiler/codegen/arm/armv6-vfp/ArchVariant.h b/vm/compiler/codegen/arm/armv6-vfp/ArchVariant.h
new file mode 100644
index 000000000..727c5215b
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv6-vfp/ArchVariant.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DALVIK_VM_COMPILER_CODEGEN_ARM_ARMV5TE_VFP_ARCHVARIANT_H_
+#define DALVIK_VM_COMPILER_CODEGEN_ARM_ARMV5TE_VFP_ARCHVARIANT_H_
+
+/* Create the TemplateOpcode enum */
+#define JIT_TEMPLATE(X) TEMPLATE_##X,
+enum TemplateOpcode {
+#include "../../../template/armv5te-vfp/TemplateOpList.h"
+/*
+ * For example,
+ * TEMPLATE_CMP_LONG,
+ * TEMPLATE_RETURN,
+ * ...
+ */
+ TEMPLATE_LAST_MARK,
+};
+#undef JIT_TEMPLATE
+
+#endif // DALVIK_VM_COMPILER_CODEGEN_ARM_ARMV5TE_VFP_ARCHVARIANT_H_
diff --git a/vm/compiler/codegen/arm/armv6-vfp/CallingConvention.S b/vm/compiler/codegen/arm/armv6-vfp/CallingConvention.S
new file mode 100644
index 000000000..4f1239564
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv6-vfp/CallingConvention.S
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Save & restore for callee-save FP registers.
+ * On entry:
+ * r0 : pointer to save area of JIT_CALLEE_SAVE_WORD_SIZE
+ */
+ .text
+ .align 2
+ .global dvmJitCalleeSave
+ .type dvmJitCalleeSave, %function
+dvmJitCalleeSave:
+ vstmia r0, {d8-d15}
+ bx lr
+
+ .global dvmJitCalleeRestore
+ .type dvmJitCalleeRestore, %function
+dvmJitCalleeRestore:
+ vldmia r0, {d8-d15}
+ bx lr
diff --git a/vm/compiler/codegen/arm/armv6-vfp/Codegen.cpp b/vm/compiler/codegen/arm/armv6-vfp/Codegen.cpp
new file mode 100644
index 000000000..55321bb9c
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv6-vfp/Codegen.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define _CODEGEN_C
+#define _ARMV5TE_VFP
+#define TGT_LIR ArmLIR
+
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "libdex/DexOpcodes.h"
+#include "compiler/CompilerInternals.h"
+#include "compiler/codegen/arm/ArmLIR.h"
+#include "mterp/common/FindInterface.h"
+#include "compiler/codegen/Ralloc.h"
+#include "compiler/codegen/arm/Codegen.h"
+#include "compiler/Loop.h"
+#include "ArchVariant.h"
+
+/* Arm codegen building blocks */
+#include "../CodegenCommon.cpp"
+
+/* Thumb-specific factory utilities */
+#include "../Thumb/Factory.cpp"
+/* Target independent factory utilities */
+#include "../../CodegenFactory.cpp"
+/* Arm-specific factory utilities */
+#include "../ArchFactory.cpp"
+
+/* Thumb-specific codegen routines */
+#include "../Thumb/Gen.cpp"
+/* Thumb+VFP codegen routines */
+#include "../FP/ThumbVFP.cpp"
+
+/* Thumb-specific register allocation */
+#include "../Thumb/Ralloc.cpp"
+
+/* MIR2LIR dispatcher and architectural independent codegen routines */
+#include "../CodegenDriver.cpp"
+
+/* Dummy driver for method-based JIT */
+#include "../armv5te/MethodCodegenDriver.cpp"
+
+/* Architecture manifest */
+#include "ArchVariant.cpp"
diff --git a/vm/compiler/codegen/arm/armv6j/ArchVariant.cpp b/vm/compiler/codegen/arm/armv6j/ArchVariant.cpp
new file mode 100644
index 000000000..f2d4815ac
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv6j/ArchVariant.cpp
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+extern "C" void dvmCompilerTemplateStart(void);
+
+/*
+ * This file is included by Codegen-armv5te.c, and implements architecture
+ * variant-specific code.
+ */
+
+/*
+ * Determine the initial instruction set to be used for this trace.
+ * Later components may decide to change this.
+ */
+JitInstructionSetType dvmCompilerInstructionSet(void)
+{
+ return DALVIK_JIT_THUMB;
+}
+
+/* First, declare dvmCompiler_TEMPLATE_XXX for each template */
+#define JIT_TEMPLATE(X) extern "C" void dvmCompiler_TEMPLATE_##X();
+#include "../../../template/armv5te/TemplateOpList.h"
+#undef JIT_TEMPLATE
+
+/* Architecture-specific initializations and checks go here */
+bool dvmCompilerArchVariantInit(void)
+{
+ int i = 0;
+
+ /*
+ * Then, populate the templateEntryOffsets array with the offsets from the
+ * the dvmCompilerTemplateStart symbol for each template.
+ */
+#define JIT_TEMPLATE(X) templateEntryOffsets[i++] = \
+ (intptr_t) dvmCompiler_TEMPLATE_##X - (intptr_t) dvmCompilerTemplateStart;
+#include "../../../template/armv5te/TemplateOpList.h"
+#undef JIT_TEMPLATE
+
+ /* Target-specific configuration */
+ gDvmJit.jitTableSize = 1 << 9; // 512
+ gDvmJit.jitTableMask = gDvmJit.jitTableSize - 1;
+ gDvmJit.threshold = 200;
+ gDvmJit.codeCacheSize = 512*1024;
+
+#if defined(WITH_SELF_VERIFICATION)
+ /* Force into blocking mode */
+ gDvmJit.blockingMode = true;
+ gDvm.nativeDebuggerActive = true;
+#endif
+
+ /* Codegen-specific assumptions */
+ assert(OFFSETOF_MEMBER(ClassObject, vtable) < 128 &&
+ (OFFSETOF_MEMBER(ClassObject, vtable) & 0x3) == 0);
+ assert(OFFSETOF_MEMBER(ArrayObject, length) < 128 &&
+ (OFFSETOF_MEMBER(ArrayObject, length) & 0x3) == 0);
+ assert(OFFSETOF_MEMBER(ArrayObject, contents) < 256);
+
+ /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */
+ assert(sizeof(StackSaveArea) < 236);
+
+ /*
+ * EA is calculated by doing "Rn + imm5 << 2". Make sure that the last
+ * offset from the struct is less than 128.
+ */
+ if ((offsetof(Thread, jitToInterpEntries) +
+ sizeof(struct JitToInterpEntries)) >= 128) {
+ ALOGE("Thread.jitToInterpEntries size overflow");
+ dvmAbort();
+ }
+
+ /* No method JIT for Thumb backend */
+ gDvmJit.disableOpt |= (1 << kMethodJit);
+
+ // Make sure all threads have current values
+ dvmJitUpdateThreadStateAll();
+
+ return true;
+}
+
+int dvmCompilerTargetOptHint(int key)
+{
+ int res;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 2;
+ break;
+ default:
+ ALOGE("Unknown target optimization hint key: %d",key);
+ res = 0;
+ }
+ return res;
+}
+
+void dvmCompilerGenMemBarrier(CompilationUnit *cUnit, int barrierKind)
+{
+#if ANDROID_SMP != 0
+#error armv5+smp not supported
+#endif
+}
diff --git a/vm/compiler/codegen/arm/armv6j/ArchVariant.h b/vm/compiler/codegen/arm/armv6j/ArchVariant.h
new file mode 100644
index 000000000..39a95483e
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv6j/ArchVariant.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DALVIK_VM_COMPILER_CODEGEN_ARM_ARMV5TE_ARCHVARIANT_H_
+#define DALVIK_VM_COMPILER_CODEGEN_ARM_ARMV5TE_ARCHVARIANT_H_
+
+/* Create the TemplateOpcode enum */
+#define JIT_TEMPLATE(X) TEMPLATE_##X,
+enum TemplateOpcode {
+#include "../../../template/armv5te/TemplateOpList.h"
+/*
+ * For example,
+ * TEMPLATE_CMP_LONG,
+ * TEMPLATE_RETURN,
+ * ...
+ */
+ TEMPLATE_LAST_MARK,
+};
+#undef JIT_TEMPLATE
+
+#endif // DALVIK_VM_COMPILER_CODEGEN_ARM_ARMV5TE_ARCHVARIANT_H_
diff --git a/vm/compiler/codegen/arm/armv6j/CallingConvention.S b/vm/compiler/codegen/arm/armv6j/CallingConvention.S
new file mode 100644
index 000000000..0cbc64fa6
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv6j/CallingConvention.S
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Save & restore for callee-save FP registers.
+ * On entry:
+ * r0 : pointer to save area of JIT_CALLEE_SAVE_WORD_SIZE
+ */
+ .text
+ .align 2
+ .global dvmJitCalleeSave
+ .type dvmJitCalleeSave, %function
+dvmJitCalleeSave:
+ bx lr
+
+ .global dvmJitCalleeRestore
+ .type dvmJitCalleeRestore, %function
+dvmJitCalleeRestore:
+ bx lr
diff --git a/vm/compiler/codegen/arm/armv6j/Codegen.cpp b/vm/compiler/codegen/arm/armv6j/Codegen.cpp
new file mode 100644
index 000000000..2c17536cc
--- /dev/null
+++ b/vm/compiler/codegen/arm/armv6j/Codegen.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define _CODEGEN_C
+#define _ARMV5TE
+#define TGT_LIR ArmLIR
+
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "libdex/DexOpcodes.h"
+#include "compiler/CompilerInternals.h"
+#include "compiler/codegen/arm/ArmLIR.h"
+#include "mterp/common/FindInterface.h"
+#include "compiler/codegen/Ralloc.h"
+#include "compiler/codegen/arm/Codegen.h"
+#include "compiler/Loop.h"
+#include "ArchVariant.h"
+
+/* Arm codegen building blocks */
+#include "../CodegenCommon.cpp"
+
+/* Thumb-specific building blocks */
+#include "../Thumb/Factory.cpp"
+/* Target independent factory utilities */
+#include "../../CodegenFactory.cpp"
+/* Arm-specific factory utilities */
+#include "../ArchFactory.cpp"
+
+/* Thumb-specific codegen routines */
+#include "../Thumb/Gen.cpp"
+/* Thumb+Portable FP codegen routines */
+#include "../FP/ThumbPortableFP.cpp"
+
+/* Thumb-specific register allocation */
+#include "../Thumb/Ralloc.cpp"
+
+/* MIR2LIR dispatcher and architectural independent codegen routines */
+#include "../CodegenDriver.cpp"
+
+/* Dummy driver for method-based JIT */
+#include "../armv5te/MethodCodegenDriver.cpp"
+
+/* Architecture manifest */
+#include "ArchVariant.cpp"
diff --git a/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S b/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
index 4fd5a71d7..23614e913 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_CMPL_DOUBLE.S
@@ -16,8 +16,8 @@
/* op vAA, vBB, vCC */
push {r0-r3} @ save operands
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ ldr ip, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ blx ip
bhi .L${opcode}_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0, trumps less than
@@ -30,8 +30,8 @@
.L${opcode}_gt_or_nan:
pop {r2-r3} @ restore operands in reverse order
pop {r0-r1} @ restore operands in reverse order
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
$naninst @ r1<- 1 or -1 for NaN
diff --git a/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S b/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
index d0f2bec93..f9293e6d3 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_CMPL_FLOAT.S
@@ -36,8 +36,8 @@
mov r9, r0 @ Save copies - we may need to redo
mov r10, r1
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ ldr ip, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ blx ip
bhi .L${opcode}_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r0<- -1
moveq r0, #0 @ (equal) r0<- 0, trumps less than
@@ -48,8 +48,8 @@
.L${opcode}_gt_or_nan:
mov r0, r10 @ restore in reverse order
mov r1, r9
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
$naninst @ r1<- 1 or -1 for NaN
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
index 03b97a479..99a17abdc 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S
@@ -41,8 +41,8 @@ $chaintgt:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
index 2a73c22d5..d8661d983 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -44,8 +44,8 @@
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -54,8 +54,8 @@
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
diff --git a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
index a7a09614a..b7015eb5f 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -48,8 +48,8 @@
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -57,4 +57,4 @@
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
diff --git a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
index d074c9eaa..b10afcf3f 100644
--- a/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
+++ b/vm/compiler/template/armv5te/TEMPLATE_RETURN.S
@@ -9,8 +9,8 @@
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
diff --git a/vm/compiler/template/armv5te/footer.S b/vm/compiler/template/armv5te/footer.S
index 16660ae3b..001b80b2f 100644
--- a/vm/compiler/template/armv5te/footer.S
+++ b/vm/compiler/template/armv5te/footer.S
@@ -29,20 +29,20 @@
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -68,7 +68,7 @@
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -85,7 +85,7 @@
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_ADD_DOUBLE_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_ADD_DOUBLE_VFP.S
new file mode 100644
index 000000000..51693fa0e
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_ADD_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/fbinopWide.S" {"instr":"faddd d2, d0, d1"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_ADD_FLOAT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_ADD_FLOAT_VFP.S
new file mode 100644
index 000000000..ad1e12211
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_ADD_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/fbinop.S" {"instr":"fadds s2, s0, s1"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_CMPG_DOUBLE_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_CMPG_DOUBLE_VFP.S
new file mode 100644
index 000000000..992c8948e
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_CMPG_DOUBLE_VFP.S
@@ -0,0 +1,33 @@
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ *
+ * On entry:
+ * r0 = &op1 [vBB]
+ * r1 = &op2 [vCC]
+ */
+ /* op vAA, vBB, vCC */
+ fldd d0, [r0] @ d0<- vBB
+ fldd d1, [r1] @ d1<- vCC
+ fcmpd d0, d1 @ compare (vBB, vCC)
+ mov r0, #1 @ r0<- 1 (default)
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r0<- -1
+ moveq r0, #0 @ (equal) r0<- 0
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_CMPG_FLOAT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_CMPG_FLOAT_VFP.S
new file mode 100644
index 000000000..0510ef69d
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_CMPG_FLOAT_VFP.S
@@ -0,0 +1,32 @@
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ * On entry:
+ * r0 = &op1 [vBB]
+ * r1 = &op2 [vCC]
+ */
+ /* op vAA, vBB, vCC */
+ flds s0, [r0] @ d0<- vBB
+ flds s1, [r1] @ d1<- vCC
+ fcmps s0, s1 @ compare (vBB, vCC)
+ mov r0, #1 @ r0<- 1 (default)
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r0<- -1
+ moveq r0, #0 @ (equal) r0<- 0
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_CMPL_DOUBLE_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_CMPL_DOUBLE_VFP.S
new file mode 100644
index 000000000..7241af14e
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_CMPL_DOUBLE_VFP.S
@@ -0,0 +1,32 @@
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ * On entry:
+ * r0 = &op1 [vBB]
+ * r1 = &op2 [vCC]
+ */
+ /* op vAA, vBB, vCC */
+ fldd d0, [r0] @ d0<- vBB
+ fldd d1, [r1] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ mvn r0, #0 @ r0<- -1 (default)
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r0<- 1
+ moveq r0, #0 @ (equal) r0<- 0
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_CMPL_FLOAT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_CMPL_FLOAT_VFP.S
new file mode 100644
index 000000000..bdb42d60f
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_CMPL_FLOAT_VFP.S
@@ -0,0 +1,32 @@
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ * On entry:
+ * r0 = &op1 [vBB]
+ * r1 = &op2 [vCC]
+ */
+ /* op vAA, vBB, vCC */
+ flds s0, [r0] @ d0<- vBB
+ flds s1, [r1] @ d1<- vCC
+ fcmps s0, s1 @ compare (vBB, vCC)
+ mvn r0, #0 @ r0<- -1 (default)
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r0<- 1
+ moveq r0, #0 @ (equal) r0<- 0
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_DIV_DOUBLE_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_DIV_DOUBLE_VFP.S
new file mode 100644
index 000000000..8fa58b86a
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_DIV_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/fbinopWide.S" {"instr":"fdivd d2, d0, d1"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_DIV_FLOAT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_DIV_FLOAT_VFP.S
new file mode 100644
index 000000000..fc125ce6a
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_DIV_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/fbinop.S" {"instr":"fdivs s2, s0, s1"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S
new file mode 100644
index 000000000..dba3b082f
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/funopNarrower.S" {"instr":"fcvtsd s0, d0"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_DOUBLE_TO_INT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_DOUBLE_TO_INT_VFP.S
new file mode 100644
index 000000000..4d910aadd
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_DOUBLE_TO_INT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/funopNarrower.S" {"instr":"ftosizd s0, d0"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S
new file mode 100644
index 000000000..a5157dd8b
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/funopWider.S" {"instr":"fcvtds d0, s0"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_FLOAT_TO_INT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_FLOAT_TO_INT_VFP.S
new file mode 100644
index 000000000..90900aa1b
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_FLOAT_TO_INT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/funop.S" {"instr":"ftosizs s1, s0"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_INT_TO_DOUBLE_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_INT_TO_DOUBLE_VFP.S
new file mode 100644
index 000000000..c9f4fd688
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_INT_TO_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/funopWider.S" {"instr":"fsitod d0, s0"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_INT_TO_FLOAT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_INT_TO_FLOAT_VFP.S
new file mode 100644
index 000000000..a8f57b505
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_INT_TO_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/funop.S" {"instr":"fsitos s1, s0"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_MEM_OP_DECODE.S b/vm/compiler/template/armv6-vfp/TEMPLATE_MEM_OP_DECODE.S
new file mode 100644
index 000000000..8bee85344
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_MEM_OP_DECODE.S
@@ -0,0 +1,19 @@
+#if defined(WITH_SELF_VERIFICATION)
+ /*
+ * This handler encapsulates heap memory ops for selfVerification mode.
+ *
+ * The call to the handler is inserted prior to a heap memory operation.
+ * This handler then calls a function to decode the memory op, and process
+ * it accordingly. Afterwards, the handler changes the return address to
+ * skip the memory op so it never gets executed.
+ */
+ vpush {d0-d15} @ save out all fp registers
+ push {r0-r12,lr} @ save out all registers
+ ldr r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
+ mov r0, lr @ arg0 <- link register
+ mov r1, sp @ arg1 <- stack pointer
+ blx r2 @ decode and handle the mem op
+ pop {r0-r12,lr} @ restore all registers
+ vpop {d0-d15} @ restore all fp registers
+ bx lr @ return to compiled code
+#endif
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_MUL_DOUBLE_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_MUL_DOUBLE_VFP.S
new file mode 100644
index 000000000..459e7960e
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_MUL_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/fbinopWide.S" {"instr":"fmuld d2, d0, d1"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_MUL_FLOAT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_MUL_FLOAT_VFP.S
new file mode 100644
index 000000000..301fa8436
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_MUL_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/fbinop.S" {"instr":"fmuls s2, s0, s1"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_RESTORE_STATE.S b/vm/compiler/template/armv6-vfp/TEMPLATE_RESTORE_STATE.S
new file mode 100644
index 000000000..196d08281
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_RESTORE_STATE.S
@@ -0,0 +1,11 @@
+ /*
+ * This handler restores state following a selfVerification memory access.
+ * On entry:
+ * r0 - offset from rSELF to the 1st element of the coreRegs save array.
+ */
+ add r0, r0, rSELF @ pointer to heapArgSpace.coreRegs[0]
+ add r0, #64 @ pointer to heapArgSpace.fpRegs[0]
+ vldmia r0, {d0-d15}
+ sub r0, #64 @ pointer to heapArgSpace.coreRegs[0]
+ ldmia r0, {r0-r12}
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_SAVE_STATE.S b/vm/compiler/template/armv6-vfp/TEMPLATE_SAVE_STATE.S
new file mode 100644
index 000000000..11f62b7f5
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_SAVE_STATE.S
@@ -0,0 +1,23 @@
+ /*
+ * This handler performs a register save for selfVerification mode.
+ * On entry:
+ * Top of stack + 4: r7 value to save
+ * Top of stack + 0: r0 value to save
+ * r0 - offset from rSELF to the beginning of the heapArgSpace record
+ * r7 - the value of regMap
+ *
+ * The handler must save regMap, r0-r12 and then return with r0-r12
+ * with their original values (note that this means r0 and r7 must take
+ * the values on the stack - not the ones in those registers on entry.
+ * Finally, the two registers previously pushed must be popped.
+ */
+ add r0, r0, rSELF @ pointer to heapArgSpace
+ stmia r0!, {r7} @ save regMap
+ ldr r7, [r13, #0] @ recover r0 value
+ stmia r0!, {r7} @ save r0
+ ldr r7, [r13, #4] @ recover r7 value
+ stmia r0!, {r1-r12}
+ add r0, #12 @ move to start of FP save regio
+ vstmia r0, {d0-d15}
+ pop {r0, r7} @ recover r0, r7
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_SQRT_DOUBLE_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_SQRT_DOUBLE_VFP.S
new file mode 100644
index 000000000..1c6bb467b
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_SQRT_DOUBLE_VFP.S
@@ -0,0 +1,23 @@
+%verify "executed"
+ /*
+ * 64-bit floating point vfp sqrt operation.
+ * If the result is a NaN, bail out to library code to do
+ * the right thing.
+ *
+ * On entry:
+ * r2 src addr of op1
+ * On exit:
+ * r0,r1 = res
+ */
+ fldd d0, [r2]
+ fsqrtd d1, d0
+ fcmpd d1, d1
+ fmstat
+ fmrrd r0, r1, d1
+ bxeq lr @ Result OK - return
+ ldr r2, .Lsqrt
+ fmrrd r0, r1, d0 @ reload orig operand
+ bx r2 @ tail call to sqrt library routine
+
+.Lsqrt:
+ .word sqrt
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_SUB_DOUBLE_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_SUB_DOUBLE_VFP.S
new file mode 100644
index 000000000..8fa20a028
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_SUB_DOUBLE_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/fbinopWide.S" {"instr":"fsubd d2, d0, d1"}
diff --git a/vm/compiler/template/armv6-vfp/TEMPLATE_SUB_FLOAT_VFP.S b/vm/compiler/template/armv6-vfp/TEMPLATE_SUB_FLOAT_VFP.S
new file mode 100644
index 000000000..5e17e514c
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TEMPLATE_SUB_FLOAT_VFP.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "armv5te-vfp/fbinop.S" {"instr":"fsubs s2, s0, s1"}
diff --git a/vm/compiler/template/armv6-vfp/TemplateOpList.h b/vm/compiler/template/armv6-vfp/TemplateOpList.h
new file mode 100644
index 000000000..0365ba4d2
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/TemplateOpList.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Dalvik opcode list that uses additional templates to complete JIT execution.
+ */
+#ifndef JIT_TEMPLATE
+#define JIT_TEMPLATE(X)
+#endif
+
+JIT_TEMPLATE(CMP_LONG)
+JIT_TEMPLATE(RETURN)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE)
+JIT_TEMPLATE(MUL_LONG)
+JIT_TEMPLATE(SHL_LONG)
+JIT_TEMPLATE(SHR_LONG)
+JIT_TEMPLATE(USHR_LONG)
+JIT_TEMPLATE(ADD_FLOAT_VFP)
+JIT_TEMPLATE(SUB_FLOAT_VFP)
+JIT_TEMPLATE(MUL_FLOAT_VFP)
+JIT_TEMPLATE(DIV_FLOAT_VFP)
+JIT_TEMPLATE(ADD_DOUBLE_VFP)
+JIT_TEMPLATE(SUB_DOUBLE_VFP)
+JIT_TEMPLATE(MUL_DOUBLE_VFP)
+JIT_TEMPLATE(DIV_DOUBLE_VFP)
+JIT_TEMPLATE(DOUBLE_TO_FLOAT_VFP)
+JIT_TEMPLATE(DOUBLE_TO_INT_VFP)
+JIT_TEMPLATE(FLOAT_TO_DOUBLE_VFP)
+JIT_TEMPLATE(FLOAT_TO_INT_VFP)
+JIT_TEMPLATE(INT_TO_DOUBLE_VFP)
+JIT_TEMPLATE(INT_TO_FLOAT_VFP)
+JIT_TEMPLATE(CMPG_DOUBLE_VFP)
+JIT_TEMPLATE(CMPL_DOUBLE_VFP)
+JIT_TEMPLATE(CMPG_FLOAT_VFP)
+JIT_TEMPLATE(CMPL_FLOAT_VFP)
+JIT_TEMPLATE(SQRT_DOUBLE_VFP)
+JIT_TEMPLATE(THROW_EXCEPTION_COMMON)
+JIT_TEMPLATE(MEM_OP_DECODE)
+JIT_TEMPLATE(STRING_COMPARETO)
+JIT_TEMPLATE(STRING_INDEXOF)
+JIT_TEMPLATE(INTERPRET)
+JIT_TEMPLATE(MONITOR_ENTER)
+JIT_TEMPLATE(MONITOR_ENTER_DEBUG)
+JIT_TEMPLATE(PERIODIC_PROFILING)
+JIT_TEMPLATE(RETURN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE_PROF)
diff --git a/vm/compiler/template/armv6-vfp/fbinop.S b/vm/compiler/template/armv6-vfp/fbinop.S
new file mode 100644
index 000000000..3bc4b52a9
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/fbinop.S
@@ -0,0 +1,14 @@
+ /*
+ * Generic 32-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ flds s0,[r1]
+ flds s1,[r2]
+ $instr
+ fsts s2,[r0]
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/fbinopWide.S b/vm/compiler/template/armv6-vfp/fbinopWide.S
new file mode 100644
index 000000000..3774646bf
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/fbinopWide.S
@@ -0,0 +1,14 @@
+ /*
+ * Generic 64-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ fldd d0,[r1]
+ fldd d1,[r2]
+ $instr
+ fstd d2,[r0]
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/funop.S b/vm/compiler/template/armv6-vfp/funop.S
new file mode 100644
index 000000000..8409c287c
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/funop.S
@@ -0,0 +1,15 @@
+ /*
+ * Generic 32bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "s1 = op s0".
+ *
+ * For: float-to-int, int-to-float
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = src dalvik register address
+ */
+ /* unop vA, vB */
+ flds s0, [r1] @ s0<- vB
+ $instr @ s1<- op s0
+ fsts s1, [r0] @ vA<- s1
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/funopNarrower.S b/vm/compiler/template/armv6-vfp/funopNarrower.S
new file mode 100644
index 000000000..8566fcaf2
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/funopNarrower.S
@@ -0,0 +1,15 @@
+ /*
+ * Generic 64bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = src dalvik register address
+ */
+ /* unop vA, vB */
+ fldd d0, [r1] @ d0<- vB
+ $instr @ s0<- op d0
+ fsts s0, [r0] @ vA<- s0
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/funopWider.S b/vm/compiler/template/armv6-vfp/funopWider.S
new file mode 100644
index 000000000..dbe745c9b
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/funopWider.S
@@ -0,0 +1,15 @@
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = src dalvik register address
+ */
+ /* unop vA, vB */
+ flds s0, [r1] @ s0<- vB
+ $instr @ d0<- op s0
+ fstd d0, [r0] @ vA<- d0
+ bx lr
diff --git a/vm/compiler/template/armv6-vfp/platform.S b/vm/compiler/template/armv6-vfp/platform.S
new file mode 100644
index 000000000..e0666a57a
--- /dev/null
+++ b/vm/compiler/template/armv6-vfp/platform.S
@@ -0,0 +1,5 @@
+/*
+ * ===========================================================================
+ * CPU-version-specific defines and utility
+ * ===========================================================================
+ */
diff --git a/vm/compiler/template/armv6j/TEMPLATE_CMPG_DOUBLE.S b/vm/compiler/template/armv6j/TEMPLATE_CMPG_DOUBLE.S
new file mode 100644
index 000000000..f18f6d3ae
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_CMPG_DOUBLE.S
@@ -0,0 +1 @@
+%include "armv5te/TEMPLATE_CMPL_DOUBLE.S" { "naninst":"mov r0, #1" }
diff --git a/vm/compiler/template/armv6j/TEMPLATE_CMPG_FLOAT.S b/vm/compiler/template/armv6j/TEMPLATE_CMPG_FLOAT.S
new file mode 100644
index 000000000..02887e57d
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_CMPG_FLOAT.S
@@ -0,0 +1 @@
+%include "armv5te/TEMPLATE_CMPL_FLOAT.S" { "naninst":"mov r0, #1" }
diff --git a/vm/compiler/template/armv6j/TEMPLATE_CMPL_DOUBLE.S b/vm/compiler/template/armv6j/TEMPLATE_CMPL_DOUBLE.S
new file mode 100644
index 000000000..23614e913
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_CMPL_DOUBLE.S
@@ -0,0 +1,38 @@
+%default { "naninst":"mvn r0, #0" }
+ /*
+ * For the JIT: incoming arguments in r0-r1, r2-r3
+ * result in r0
+ *
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ push {r0-r3} @ save operands
+ mov r11, lr @ save return address
+ ldr ip, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ blx ip
+ bhi .L${opcode}_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0, trumps less than
+ add sp, #16 @ drop unused operands
+ bx r11
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.L${opcode}_gt_or_nan:
+ pop {r2-r3} @ restore operands in reverse order
+ pop {r0-r1} @ restore operands in reverse order
+ ldr ip, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ blx ip
+ movcc r0, #1 @ (greater than) r1<- 1
+ bxcc r11
+ $naninst @ r1<- 1 or -1 for NaN
+ bx r11
diff --git a/vm/compiler/template/armv6j/TEMPLATE_CMPL_FLOAT.S b/vm/compiler/template/armv6j/TEMPLATE_CMPL_FLOAT.S
new file mode 100644
index 000000000..f9293e6d3
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_CMPL_FLOAT.S
@@ -0,0 +1,56 @@
+%default { "naninst":"mvn r0, #0" }
+ /*
+ * For the JIT: incoming arguments in r0-r1, r2-r3
+ * result in r0
+ *
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * The straightforward implementation requires 3 calls to functions
+ * that return a result in r0. We can do it with two calls if our
+ * EABI library supports __aeabi_cfcmple (only one if we want to check
+ * for NaN directly):
+ * check x <= y
+ * if <, return -1
+ * if ==, return 0
+ * check y <= x
+ * if <, return 1
+ * return {-1,1}
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ mov r9, r0 @ Save copies - we may need to redo
+ mov r10, r1
+ mov r11, lr @ save return address
+ ldr ip, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ blx ip
+ bhi .L${opcode}_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r0, #0 @ (less than) r0<- -1
+ moveq r0, #0 @ (equal) r0<- 0, trumps less than
+ bx r11
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.L${opcode}_gt_or_nan:
+ mov r0, r10 @ restore in reverse order
+ mov r1, r9
+ ldr ip, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ blx ip
+ movcc r0, #1 @ (greater than) r1<- 1
+ bxcc r11
+ $naninst @ r1<- 1 or -1 for NaN
+ bx r11
diff --git a/vm/compiler/template/armv6j/TEMPLATE_CMP_LONG.S b/vm/compiler/template/armv6j/TEMPLATE_CMP_LONG.S
new file mode 100644
index 000000000..e5e8196cc
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_CMP_LONG.S
@@ -0,0 +1,33 @@
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .L${opcode}_less @ signed compare on high part
+ bgt .L${opcode}_greater
+ subs r0, r0, r2 @ r0<- r0 - r2
+ bxeq lr
+ bhi .L${opcode}_greater @ unsigned compare on low part
+.L${opcode}_less:
+ mvn r0, #0 @ r0<- -1
+ bx lr
+.L${opcode}_greater:
+ mov r0, #1 @ r0<- 1
+ bx lr
diff --git a/vm/compiler/template/armv6j/TEMPLATE_INTERPRET.S b/vm/compiler/template/armv6j/TEMPLATE_INTERPRET.S
new file mode 100644
index 000000000..dafa6896f
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_INTERPRET.S
@@ -0,0 +1,30 @@
+ /*
+ * This handler transfers control to the interpeter without performing
+ * any lookups. It may be called either as part of a normal chaining
+ * operation, or from the transition code in header.S. We distinquish
+ * the two cases by looking at the link register. If called from a
+ * translation chain, it will point to the chaining Dalvik PC + 1.
+ * On entry:
+ * lr - if NULL:
+ * r1 - the Dalvik PC to begin interpretation.
+ * else
+ * [lr, #-1] contains Dalvik PC to begin interpretation
+ * rSELF - pointer to thread
+ * rFP - Dalvik frame pointer
+ */
+ cmp lr, #0
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ beq 101f
+ ldr r1,[lr, #3]
+101:
+#else
+ ldrne r1,[lr, #-1]
+#endif
+ ldr r2, .LinterpPunt
+ mov r0, r1 @ set Dalvik PC
+ bx r2
+ @ doesn't return
+
+.LinterpPunt:
+ .word dvmJitToInterpPunt
diff --git a/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_CHAIN.S b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_CHAIN.S
new file mode 100644
index 000000000..99a17abdc
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_CHAIN.S
@@ -0,0 +1,49 @@
+%default { "chaintgt" : ".LinvokeChain" }
+ /*
+ * For monomorphic callsite, setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ */
+ @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
+ @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
+ @ methodToCall is guaranteed to be non-native
+$chaintgt:
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ add r12, lr, #2 @ setup the punt-to-interp address
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo r12 @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne r12 @ bail to the interpreter
+
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
+
+ bx lr @ return to the callee-chaining cell
diff --git a/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S
new file mode 100644
index 000000000..d1be4fdf7
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S" { "chaintgt" : ".LinvokeChainProf" }
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NATIVE.S b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NATIVE.S
new file mode 100644
index 000000000..d8661d983
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NATIVE.S
@@ -0,0 +1,83 @@
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ @ r7 = methodToCall->registersSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+ bxne lr @ bail to the interpreter
+#else
+ bx lr @ bail to interpreter unconditionally
+#endif
+
+ @ go ahead and transfer control to the native code
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ mov r2, #0
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
+ str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+ @ newFp->localRefCookie=top
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
+
+ mov r2, r0 @ arg2<- methodToCall
+ mov r0, r1 @ arg0<- newFP
+ add r1, rSELF, #offThread_retval @ arg1<- &retval
+ mov r3, rSELF @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rSELF
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ blx r8 @ off to the native code
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
+#endif
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+
+ @ r0 = dalvikCallsitePC
+ bne .LhandleException @ no, handle exception
+
+ str r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
+ cmp r2, #0 @ return chaining cell still exists?
+ bxne r2 @ yes - go ahead
+
+ @ continue executing the next instruction through the interpreter
+ ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+ add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1
diff --git a/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S
new file mode 100644
index 000000000..816277ac3
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S"
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NO_OPT.S b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NO_OPT.S
new file mode 100644
index 000000000..b7015eb5f
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NO_OPT.S
@@ -0,0 +1,60 @@
+ /*
+ * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+ * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+ * runtime-resolved callee.
+ */
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+ ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns
+
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne lr @ bail to the interpreter
+ tst r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
+ bne .LinvokeNative
+#else
+ bxne lr @ bail to the interpreter
+#endif
+
+ ldr r10, .LdvmJitToInterpTraceSelectNoChain
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ @ Start executing the callee
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kInlineCacheMiss
+#endif
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
diff --git a/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S
new file mode 100644
index 000000000..bfea7d915
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S"
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S
new file mode 100644
index 000000000..ef88106b3
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S
@@ -0,0 +1,58 @@
+%default { "chaintgt" : ".LinvokeChain" }
+ /*
+ * For polymorphic callsite, check whether the cached class pointer matches
+ * the current one. If so setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ *
+ * The predicted chaining cell is declared in ArmLIR.h with the
+ * following layout:
+ *
+ * typedef struct PredictedChainingCell {
+ * u4 branch;
+ * const ClassObject *clazz;
+ * const Method *method;
+ * u4 counter;
+ * } PredictedChainingCell;
+ *
+ * Upon returning to the callsite:
+ * - lr : to branch to the chaining cell
+ * - lr+2: to punt to the interpreter
+ * - lr+4: to fully resolve the callee and may rechain.
+ * r3 <- class
+ * r9 <- counter
+ */
+ @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
+ ldr r3, [r0, #offObject_clazz] @ r3 <- this->class
+ ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz
+ ldr r0, [r2, #8] @ r0 <- predictedChainCell->method
+ ldr r9, [r2, #12] @ r9 <- predictedChainCell->counter
+ cmp r3, r8 @ predicted class == actual class?
+#if defined(WITH_JIT_TUNING)
+ ldr r7, .LdvmICHitCount
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ bne 101f
+ ldr r10, [r7, #0]
+101:
+#else
+ ldreq r10, [r7, #0]
+#endif
+ add r10, r10, #1
+ streq r10, [r7, #0]
+#endif
+ ldreqh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldreqh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ beq $chaintgt @ predicted chain is valid
+ ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
+ sub r1, r9, #1 @ count--
+ str r1, [r2, #12] @ write back to PredictedChainingCell->counter
+ add lr, lr, #4 @ return to fully-resolve landing pad
+ /*
+ * r1 <- count
+ * r2 <- &predictedChainCell
+ * r3 <- this->class
+ * r4 <- dPC
+ * r7 <- this->class->vtable
+ */
+ bx lr
diff --git a/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S
new file mode 100644
index 000000000..6ca5bddb9
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S" { "chaintgt" : ".LinvokeChainProf" }
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv6j/TEMPLATE_MEM_OP_DECODE.S b/vm/compiler/template/armv6j/TEMPLATE_MEM_OP_DECODE.S
new file mode 100644
index 000000000..03926b69c
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_MEM_OP_DECODE.S
@@ -0,0 +1,17 @@
+#if defined(WITH_SELF_VERIFICATION)
+ /*
+ * This handler encapsulates heap memory ops for selfVerification mode.
+ *
+ * The call to the handler is inserted prior to a heap memory operation.
+ * This handler then calls a function to decode the memory op, and process
+ * it accordingly. Afterwards, the handler changes the return address to
+ * skip the memory op so it never gets executed.
+ */
+ push {r0-r12,lr} @ save out all registers
+ ldr r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
+ mov r0, lr @ arg0 <- link register
+ mov r1, sp @ arg1 <- stack pointer
+ blx r2 @ decode and handle the mem op
+ pop {r0-r12,lr} @ restore all registers
+ bx lr @ return to compiled code
+#endif
diff --git a/vm/compiler/template/armv6j/TEMPLATE_MONITOR_ENTER.S b/vm/compiler/template/armv6j/TEMPLATE_MONITOR_ENTER.S
new file mode 100644
index 000000000..1ed3fb1d3
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_MONITOR_ENTER.S
@@ -0,0 +1,21 @@
+ /*
+ * Call out to the runtime to lock an object. Because this thread
+ * may have been suspended in THREAD_MONITOR state and the Jit's
+ * translation cache subsequently cleared, we cannot return directly.
+ * Instead, unconditionally transition to the interpreter to resume.
+ *
+ * On entry:
+ * r0 - self pointer
+ * r1 - the object (which has already been null-checked by the caller
+ * r4 - the Dalvik PC of the following instruction.
+ */
+ ldr r2, .LdvmLockObject
+ mov r3, #0 @ Record that we're not returning
+ str r3, [r0, #offThread_inJitCodeCache]
+ blx r2 @ dvmLockObject(self, obj)
+ ldr r2, .LdvmJitToInterpNoChain
+ @ Bail to interpreter - no chain [note - r4 still contains rPC]
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kHeavyweightMonitor
+#endif
+ bx r2
diff --git a/vm/compiler/template/armv6j/TEMPLATE_MONITOR_ENTER_DEBUG.S b/vm/compiler/template/armv6j/TEMPLATE_MONITOR_ENTER_DEBUG.S
new file mode 100644
index 000000000..26954838b
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_MONITOR_ENTER_DEBUG.S
@@ -0,0 +1,28 @@
+ /*
+ * To support deadlock prediction, this version of MONITOR_ENTER
+ * will always call the heavyweight dvmLockObject, check for an
+ * exception and then bail out to the interpreter.
+ *
+ * On entry:
+ * r0 - self pointer
+ * r1 - the object (which has already been null-checked by the caller
+ * r4 - the Dalvik PC of the following instruction.
+ *
+ */
+ ldr r2, .LdvmLockObject
+ mov r3, #0 @ Record that we're not returning
+ str r3, [r0, #offThread_inJitCodeCache]
+ blx r2 @ dvmLockObject(self, obj)
+ @ test for exception
+ ldr r1, [rSELF, #offThread_exception]
+ cmp r1, #0
+ beq 1f
+ ldr r2, .LhandleException
+ sub r0, r4, #2 @ roll dPC back to this monitor instruction
+ bx r2
+1:
+ @ Bail to interpreter - no chain [note - r4 still contains rPC]
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kHeavyweightMonitor
+#endif
+ ldr pc, .LdvmJitToInterpNoChain
diff --git a/vm/compiler/template/armv6j/TEMPLATE_MUL_LONG.S b/vm/compiler/template/armv6j/TEMPLATE_MUL_LONG.S
new file mode 100644
index 000000000..8a9b11574
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_MUL_LONG.S
@@ -0,0 +1,28 @@
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * For JIT: op1 in r0/r1, op2 in r2/r3, return in r0/r1
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ mov r0,r9
+ mov r1,r10
+ bx lr
diff --git a/vm/compiler/template/armv6j/TEMPLATE_PERIODIC_PROFILING.S b/vm/compiler/template/armv6j/TEMPLATE_PERIODIC_PROFILING.S
new file mode 100644
index 000000000..c0f7d6e3b
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_PERIODIC_PROFILING.S
@@ -0,0 +1,26 @@
+ /*
+ * Increment profile counter for this trace, and decrement
+ * sample counter. If sample counter goes below zero, turn
+ * off profiling.
+ *
+ * On entry
+ * (lr-11) is address of pointer to counter. Note: the counter
+ * actually exists 10 bytes before the return target, but because
+ * we are arriving from thumb mode, lr will have its low bit set.
+ */
+ ldr r0, [lr,#-11]
+ ldr r1, [rSELF, #offThread_pProfileCountdown]
+ ldr r2, [r0] @ get counter
+ ldr r3, [r1] @ get countdown timer
+ add r2, #1
+ subs r2, #1
+ blt .L${opcode}_disable_profiling
+ str r2, [r0]
+ str r3, [r1]
+ bx lr
+
+.L${opcode}_disable_profiling:
+ mov r4, lr @ preserve lr
+ ldr r0, .LdvmJitTraceProfilingOff
+ blx r0
+ bx r4
diff --git a/vm/compiler/template/armv6j/TEMPLATE_RESTORE_STATE.S b/vm/compiler/template/armv6j/TEMPLATE_RESTORE_STATE.S
new file mode 100644
index 000000000..25b4ffa82
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_RESTORE_STATE.S
@@ -0,0 +1,8 @@
+ /*
+ * This handler restores state following a selfVerification memory access.
+ * On entry:
+ * r0 - offset from rSELF to the 1st element of the coreRegs save array.
+ */
+ add r0, r0, rSELF @ pointer to heapArgSpace.coreRegs[0]
+ ldmia r0, {r0-r12}
+ bx lr
diff --git a/vm/compiler/template/armv6j/TEMPLATE_RETURN.S b/vm/compiler/template/armv6j/TEMPLATE_RETURN.S
new file mode 100644
index 000000000..b10afcf3f
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_RETURN.S
@@ -0,0 +1,57 @@
+ /*
+ * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+ * If the stored value in returnAddr
+ * is non-zero, the caller is compiled by the JIT thus return to the
+ * address in the code cache following the invoke instruction. Otherwise
+ * return to the special dvmJitToInterpNoChain entry point.
+ */
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rSELF
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
+ SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
+ ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+ ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+ mov r9, #0 @ disable chaining
+#endif
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ @ r2<- method we're returning to
+ cmp r2, #0 @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
+ beq 1f @ bail to interpreter
+#else
+ blxeq lr @ punt to interpreter and compare state
+#endif
+ ldr r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
+ mov rFP, r10 @ publish new FP
+ ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+
+ str r2, [rSELF, #offThread_method]@ self->method = newSave->method
+ ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ add rPC, rPC, #6 @ publish new rPC (advance 6 bytes)
+ str r0, [rSELF, #offThread_methodClassDex]
+ cmp r8, #0 @ check the break flags
+ movne r9, #0 @ clear the chaining cell address
+ str r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
+ cmp r9, #0 @ chaining cell exists?
+ blxne r9 @ jump to the chaining cell
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1 @ callsite is interpreted
+1:
+ mov r0, #0
+ str r0, [rSELF, #offThread_inJitCodeCache] @ reset inJitCodeCache
+ stmia rSELF, {rPC, rFP} @ SAVE_PC_FP_TO_SELF()
+ ldr r2, .LdvmMterpStdBail @ defined in footer.S
+ mov r0, rSELF @ Expecting rSELF in r0
+ blx r2 @ exit the interpreter
diff --git a/vm/compiler/template/armv6j/TEMPLATE_RETURN_PROF.S b/vm/compiler/template/armv6j/TEMPLATE_RETURN_PROF.S
new file mode 100644
index 000000000..d7af0bd1f
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_RETURN_PROF.S
@@ -0,0 +1,3 @@
+#define TEMPLATE_INLINE_PROFILING
+%include "armv5te/TEMPLATE_RETURN.S"
+#undef TEMPLATE_INLINE_PROFILING
diff --git a/vm/compiler/template/armv6j/TEMPLATE_SAVE_STATE.S b/vm/compiler/template/armv6j/TEMPLATE_SAVE_STATE.S
new file mode 100644
index 000000000..1c3aa4d3d
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_SAVE_STATE.S
@@ -0,0 +1,21 @@
+ /*
+ * This handler performs a register save for selfVerification mode.
+ * On entry:
+ * Top of stack + 4: r7 value to save
+ * Top of stack + 0: r0 value to save
+ * r0 - offset from rSELF to the beginning of the heapArgSpace record
+ * r7 - the value of regMap
+ *
+ * The handler must save regMap, r0-r12 and then return with r0-r12
+ * with their original values (note that this means r0 and r7 must take
+ * the values on the stack - not the ones in those registers on entry.
+ * Finally, the two registers previously pushed must be popped.
+ */
+ add r0, r0, rSELF @ pointer to heapArgSpace
+ stmia r0!, {r7} @ save regMap
+ ldr r7, [r13, #0] @ recover r0 value
+ stmia r0!, {r7} @ save r0
+ ldr r7, [r13, #4] @ recover r7 value
+ stmia r0!, {r1-r12}
+ pop {r0, r7} @ recover r0, r7
+ bx lr
diff --git a/vm/compiler/template/armv6j/TEMPLATE_SHL_LONG.S b/vm/compiler/template/armv6j/TEMPLATE_SHL_LONG.S
new file mode 100644
index 000000000..532f8a47a
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_SHL_LONG.S
@@ -0,0 +1,15 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ */
+ /* shl-long vAA, vBB, vCC */
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ bx lr
diff --git a/vm/compiler/template/armv6j/TEMPLATE_SHR_LONG.S b/vm/compiler/template/armv6j/TEMPLATE_SHR_LONG.S
new file mode 100644
index 000000000..c73784058
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_SHR_LONG.S
@@ -0,0 +1,15 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ */
+ /* shr-long vAA, vBB, vCC */
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ bx lr
diff --git a/vm/compiler/template/armv6j/TEMPLATE_STRING_COMPARETO.S b/vm/compiler/template/armv6j/TEMPLATE_STRING_COMPARETO.S
new file mode 100644
index 000000000..54bde4732
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_STRING_COMPARETO.S
@@ -0,0 +1,133 @@
+ /*
+ * String's compareTo.
+ *
+ * Requires r0/r1 to have been previously checked for null. Will
+ * return negative if this's string is < comp, 0 if they are the
+ * same and positive if >.
+ *
+ * IMPORTANT NOTE:
+ *
+ * This code relies on hard-coded offsets for string objects, and must be
+ * kept in sync with definitions in UtfString.h. See asm-constants.h
+ *
+ * On entry:
+ * r0: this object pointer
+ * r1: comp object pointer
+ *
+ */
+
+ mov r2, r0 @ this to r2, opening up r0 for return value
+ subs r0, r2, r1 @ Same?
+ bxeq lr
+
+ ldr r4, [r2, #STRING_FIELDOFF_OFFSET]
+ ldr r9, [r1, #STRING_FIELDOFF_OFFSET]
+ ldr r7, [r2, #STRING_FIELDOFF_COUNT]
+ ldr r10, [r1, #STRING_FIELDOFF_COUNT]
+ ldr r2, [r2, #STRING_FIELDOFF_VALUE]
+ ldr r1, [r1, #STRING_FIELDOFF_VALUE]
+
+ /*
+ * At this point, we have:
+ * value: r2/r1
+ * offset: r4/r9
+ * count: r7/r10
+ * We're going to compute
+ * r11 <- countDiff
+ * r10 <- minCount
+ */
+ subs r11, r7, r10
+ movls r10, r7
+
+ /* Now, build pointers to the string data */
+ add r2, r2, r4, lsl #1
+ add r1, r1, r9, lsl #1
+ /*
+ * Note: data pointers point to previous element so we can use pre-index
+ * mode with base writeback.
+ */
+ add r2, #16-2 @ offset to contents[-1]
+ add r1, #16-2 @ offset to contents[-1]
+
+ /*
+ * At this point we have:
+ * r2: *this string data
+ * r1: *comp string data
+ * r10: iteration count for comparison
+ * r11: value to return if the first part of the string is equal
+ * r0: reserved for result
+ * r3, r4, r7, r8, r9, r12 available for loading string data
+ */
+
+ subs r10, #2
+ blt do_remainder2
+
+ /*
+ * Unroll the first two checks so we can quickly catch early mismatch
+ * on long strings (but preserve incoming alignment)
+ */
+
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ ldrh r7, [r2, #2]!
+ ldrh r8, [r1, #2]!
+ subs r0, r3, r4
+ subeqs r0, r7, r8
+ bxne lr
+ cmp r10, #28
+ bgt do_memcmp16
+ subs r10, #3
+ blt do_remainder
+
+loopback_triple:
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ ldrh r7, [r2, #2]!
+ ldrh r8, [r1, #2]!
+ ldrh r9, [r2, #2]!
+ ldrh r12,[r1, #2]!
+ subs r0, r3, r4
+ subeqs r0, r7, r8
+ subeqs r0, r9, r12
+ bxne lr
+ subs r10, #3
+ bge loopback_triple
+
+do_remainder:
+ adds r10, #3
+ beq returnDiff
+
+loopback_single:
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ subs r0, r3, r4
+ bxne lr
+ subs r10, #1
+ bne loopback_single
+
+returnDiff:
+ mov r0, r11
+ bx lr
+
+do_remainder2:
+ adds r10, #2
+ bne loopback_single
+ mov r0, r11
+ bx lr
+
+ /* Long string case */
+do_memcmp16:
+ mov r4, lr
+ ldr lr, .Lmemcmp16
+ mov r7, r11
+ add r0, r2, #2
+ add r1, r1, #2
+ mov r2, r10
+ blx lr
+ cmp r0, #0
+ bxne r4
+ mov r0, r7
+ bx r4
+
+.Lmemcmp16:
+ .word __memcmp16
diff --git a/vm/compiler/template/armv6j/TEMPLATE_STRING_INDEXOF.S b/vm/compiler/template/armv6j/TEMPLATE_STRING_INDEXOF.S
new file mode 100644
index 000000000..bdfdf28f5
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_STRING_INDEXOF.S
@@ -0,0 +1,112 @@
+ /*
+ * String's indexOf.
+ *
+ * Requires r0 to have been previously checked for null. Will
+ * return index of match of r1 in r0.
+ *
+ * IMPORTANT NOTE:
+ *
+ * This code relies on hard-coded offsets for string objects, and must be
+ * kept in sync wth definitions in UtfString.h See asm-constants.h
+ *
+ * On entry:
+ * r0: string object pointer
+ * r1: char to match
+ * r2: Starting offset in string data
+ */
+
+ ldr r7, [r0, #STRING_FIELDOFF_OFFSET]
+ ldr r8, [r0, #STRING_FIELDOFF_COUNT]
+ ldr r0, [r0, #STRING_FIELDOFF_VALUE]
+
+ /*
+ * At this point, we have:
+ * r0: object pointer
+ * r1: char to match
+ * r2: starting offset
+ * r7: offset
+ * r8: string length
+ */
+
+ /* Build pointer to start of string data */
+ add r0, #16
+ add r0, r0, r7, lsl #1
+
+ /* Save a copy of starting data in r7 */
+ mov r7, r0
+
+ /* Clamp start to [0..count] */
+ cmp r2, #0
+ movlt r2, #0
+ cmp r2, r8
+ movgt r2, r8
+
+ /* Build pointer to start of data to compare and pre-bias */
+ add r0, r0, r2, lsl #1
+ sub r0, #2
+
+ /* Compute iteration count */
+ sub r8, r2
+
+ /*
+ * At this point we have:
+ * r0: start of data to test
+ * r1: chat to compare
+ * r8: iteration count
+ * r7: original start of string
+ * r3, r4, r9, r10, r11, r12 available for loading string data
+ */
+
+ subs r8, #4
+ blt indexof_remainder
+
+indexof_loop4:
+ ldrh r3, [r0, #2]!
+ ldrh r4, [r0, #2]!
+ ldrh r10, [r0, #2]!
+ ldrh r11, [r0, #2]!
+ cmp r3, r1
+ beq match_0
+ cmp r4, r1
+ beq match_1
+ cmp r10, r1
+ beq match_2
+ cmp r11, r1
+ beq match_3
+ subs r8, #4
+ bge indexof_loop4
+
+indexof_remainder:
+ adds r8, #4
+ beq indexof_nomatch
+
+indexof_loop1:
+ ldrh r3, [r0, #2]!
+ cmp r3, r1
+ beq match_3
+ subs r8, #1
+ bne indexof_loop1
+
+indexof_nomatch:
+ mov r0, #-1
+ bx lr
+
+match_0:
+ sub r0, #6
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+match_1:
+ sub r0, #4
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+match_2:
+ sub r0, #2
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+match_3:
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
diff --git a/vm/compiler/template/armv6j/TEMPLATE_THROW_EXCEPTION_COMMON.S b/vm/compiler/template/armv6j/TEMPLATE_THROW_EXCEPTION_COMMON.S
new file mode 100644
index 000000000..b737798d4
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_THROW_EXCEPTION_COMMON.S
@@ -0,0 +1,6 @@
+ /*
+ * Throw an exception from JIT'ed code.
+ * On entry:
+ * r0 Dalvik PC that raises the exception
+ */
+ b .LhandleException
diff --git a/vm/compiler/template/armv6j/TEMPLATE_USHR_LONG.S b/vm/compiler/template/armv6j/TEMPLATE_USHR_LONG.S
new file mode 100644
index 000000000..8a48df23d
--- /dev/null
+++ b/vm/compiler/template/armv6j/TEMPLATE_USHR_LONG.S
@@ -0,0 +1,15 @@
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ bx lr
diff --git a/vm/compiler/template/armv6j/TemplateOpList.h b/vm/compiler/template/armv6j/TemplateOpList.h
new file mode 100644
index 000000000..abfec4b9f
--- /dev/null
+++ b/vm/compiler/template/armv6j/TemplateOpList.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Dalvik opcode list that uses additional templates to complete JIT execution.
+ */
+#ifndef JIT_TEMPLATE
+#define JIT_TEMPLATE(X)
+#endif
+
+JIT_TEMPLATE(CMP_LONG)
+JIT_TEMPLATE(RETURN)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE)
+JIT_TEMPLATE(CMPG_DOUBLE)
+JIT_TEMPLATE(CMPL_DOUBLE)
+JIT_TEMPLATE(CMPG_FLOAT)
+JIT_TEMPLATE(CMPL_FLOAT)
+JIT_TEMPLATE(MUL_LONG)
+JIT_TEMPLATE(SHL_LONG)
+JIT_TEMPLATE(SHR_LONG)
+JIT_TEMPLATE(USHR_LONG)
+JIT_TEMPLATE(THROW_EXCEPTION_COMMON)
+JIT_TEMPLATE(MEM_OP_DECODE)
+JIT_TEMPLATE(STRING_COMPARETO)
+JIT_TEMPLATE(STRING_INDEXOF)
+JIT_TEMPLATE(INTERPRET)
+JIT_TEMPLATE(MONITOR_ENTER)
+JIT_TEMPLATE(MONITOR_ENTER_DEBUG)
+JIT_TEMPLATE(PERIODIC_PROFILING)
+JIT_TEMPLATE(RETURN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NO_OPT_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_PREDICTED_CHAIN_PROF)
+JIT_TEMPLATE(INVOKE_METHOD_NATIVE_PROF)
diff --git a/vm/compiler/template/armv6j/footer.S b/vm/compiler/template/armv6j/footer.S
new file mode 100644
index 000000000..001b80b2f
--- /dev/null
+++ b/vm/compiler/template/armv6j/footer.S
@@ -0,0 +1,129 @@
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+.LinvokeNative:
+ @ Prep for the native call
+ @ r1 = newFP, r0 = methodToCall
+ mov r2, #0
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ str r2, [rSELF, #offThread_inJitCodeCache] @ not in jit code cache
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+ @ newFp->localRefCookie=top
+ ldrh lr, [rSELF, #offThread_subMode]
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
+
+ mov r2, r0 @ r2<- methodToCall
+ mov r0, r1 @ r0<- newFP
+ add r1, rSELF, #offThread_retval @ r1<- &retval
+ mov r3, rSELF @ arg3<- self
+ ands lr, #kSubModeMethodTrace
+ beq 121f @ hop if not profiling
+ @ r2: methodToCall, r6: rSELF
+ stmfd sp!, {r2,r6}
+ stmfd sp!, {r0-r3}
+ mov r0, r2
+ mov r1, r6
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3}
+
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
+
+ ldmfd sp!, {r0-r1}
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
+ b 212f
+121:
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
+212:
+
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ ldr r0, [r10, #offStackSaveArea_savedPc] @ reload rPC
+
+ @ r0 = dalvikCallsitePC
+ bne .LhandleException @ no, handle exception
+
+ str r2, [rSELF, #offThread_inJitCodeCache] @ set the new mode
+ cmp r2, #0 @ return chaining cell still exists?
+ bxne r2 @ yes - go ahead
+
+ @ continue executing the next instruction through the interpreter
+ ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+ add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ bx r1
+
+/*
+ * On entry:
+ * r0 Faulting Dalvik PC
+ */
+.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+ ldr pc, .LdeadFood @ should not see this under self-verification mode
+.LdeadFood:
+ .word 0xdeadf00d
+#endif
+ mov r2, #0
+ str r2, [rSELF, #offThread_inJitCodeCache] @ in interpreter land
+ ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
+ ldr rIBASE, .LdvmAsmInstructionStart @ same as above
+ mov rPC, r0 @ reload the faulting Dalvik address
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
+
+ .align 2
+.LdvmAsmInstructionStart:
+ .word dvmAsmInstructionStart
+.LdvmJitToInterpNoChainNoProfile:
+ .word dvmJitToInterpNoChainNoProfile
+.LdvmJitToInterpTraceSelectNoChain:
+ .word dvmJitToInterpTraceSelectNoChain
+.LdvmJitToInterpNoChain:
+ .word dvmJitToInterpNoChain
+.LdvmMterpStdBail:
+ .word dvmMterpStdBail
+.LdvmMterpCommonExceptionThrown:
+ .word dvmMterpCommonExceptionThrown
+.LdvmLockObject:
+ .word dvmLockObject
+.LdvmJitTraceProfilingOff:
+ .word dvmJitTraceProfilingOff
+#if defined(WITH_JIT_TUNING)
+.LdvmICHitCount:
+ .word gDvmICHitCount
+#endif
+#if defined(WITH_SELF_VERIFICATION)
+.LdvmSelfVerificationMemOpDecode:
+ .word dvmSelfVerificationMemOpDecode
+#endif
+.LdvmFastMethodTraceEnter:
+ .word dvmFastMethodTraceEnter
+.LdvmFastNativeMethodTraceExit:
+ .word dvmFastNativeMethodTraceExit
+.LdvmFastMethodTraceExit:
+ .word dvmFastMethodTraceExit
+.L__aeabi_cdcmple:
+ .word __aeabi_cdcmple
+.L__aeabi_cfcmple:
+ .word __aeabi_cfcmple
+
+ .global dmvCompilerTemplateEnd
+dmvCompilerTemplateEnd:
+
+#endif /* WITH_JIT */
diff --git a/vm/compiler/template/armv6j/header.S b/vm/compiler/template/armv6j/header.S
new file mode 100644
index 000000000..6dcf5b931
--- /dev/null
+++ b/vm/compiler/template/armv6j/header.S
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(WITH_JIT)
+
+/*
+ * ARMv5 definitions and declarations.
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+JIT and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF thread pointer
+
+The following registers have fixed assignments in mterp but are scratch
+registers in compiled code
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+ sub _reg, _fpreg, #sizeofStackSaveArea
+
+#define EXPORT_PC() \
+ str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../../../mterp/common/asm-constants.h"
diff --git a/vm/compiler/template/armv6j/platform.S b/vm/compiler/template/armv6j/platform.S
new file mode 100644
index 000000000..e0666a57a
--- /dev/null
+++ b/vm/compiler/template/armv6j/platform.S
@@ -0,0 +1,5 @@
+/*
+ * ===========================================================================
+ * CPU-version-specific defines and utility
+ * ===========================================================================
+ */
diff --git a/vm/compiler/template/config-armv6-vfp b/vm/compiler/template/config-armv6-vfp
new file mode 100644
index 000000000..75165b5f5
--- /dev/null
+++ b/vm/compiler/template/config-armv6-vfp
@@ -0,0 +1,68 @@
+
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARMv6-VFP architecture targets.
+#
+
+# file header and basic definitions
+#import c/header.c
+import armv5te/header.S
+
+# C pre-processor defines for stub C instructions
+#import cstubs/stubdefs.c
+
+# highly-platform-specific defs
+import armv5te-vfp/platform.S
+
+# common defs for the C helpers; include this before the instruction handlers
+#import c/opcommon.c
+
+# opcode list; argument to op-start is default directory
+op-start armv5te-vfp
+ op TEMPLATE_CMP_LONG armv5te
+ op TEMPLATE_INVOKE_METHOD_CHAIN armv5te
+ op TEMPLATE_INVOKE_METHOD_NATIVE armv5te
+ op TEMPLATE_INVOKE_METHOD_NO_OPT armv5te
+ op TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN armv5te
+ op TEMPLATE_MUL_LONG armv5te
+ op TEMPLATE_RETURN armv5te
+ op TEMPLATE_SHL_LONG armv5te
+ op TEMPLATE_SHR_LONG armv5te
+ op TEMPLATE_USHR_LONG armv5te
+ op TEMPLATE_THROW_EXCEPTION_COMMON armv5te
+ op TEMPLATE_STRING_COMPARETO armv5te
+ op TEMPLATE_STRING_INDEXOF armv5te
+ op TEMPLATE_INTERPRET armv5te
+ op TEMPLATE_MONITOR_ENTER armv5te
+ op TEMPLATE_MONITOR_ENTER_DEBUG armv5te
+ op TEMPLATE_PERIODIC_PROFILING armv5te
+ op TEMPLATE_INVOKE_METHOD_CHAIN_PROF armv5te
+ op TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF armv5te
+ op TEMPLATE_INVOKE_METHOD_NATIVE_PROF armv5te
+ op TEMPLATE_INVOKE_METHOD_NO_OPT_PROF armv5te
+ op TEMPLATE_RETURN_PROF armv5te
+
+op-end
+
+# "helper" code for C; include if you use any of the C stubs (this generates
+# object code, so it's normally excluded)
+##import c/gotoTargets.c
+
+# end of defs; include this when cstubs/stubdefs.c is included
+#import cstubs/enddefs.c
+
+# common subroutines for asm
+import armv5te/footer.S
diff --git a/vm/compiler/template/config-armv6j b/vm/compiler/template/config-armv6j
new file mode 100644
index 000000000..feb63c1a8
--- /dev/null
+++ b/vm/compiler/template/config-armv6j
@@ -0,0 +1,68 @@
+
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARMv6J architecture targets.
+#
+
+# file header and basic definitions
+#import c/header.c
+import armv5te/header.S
+
+# C pre-processor defines for stub C instructions
+#import cstubs/stubdefs.c
+
+# highly-platform-specific defs
+import armv5te-vfp/platform.S
+
+# common defs for the C helpers; include this before the instruction handlers
+#import c/opcommon.c
+
+# opcode list; argument to op-start is default directory
+op-start armv5te
+ op TEMPLATE_CMP_LONG armv5te
+ op TEMPLATE_INVOKE_METHOD_CHAIN armv5te
+ op TEMPLATE_INVOKE_METHOD_NATIVE armv5te
+ op TEMPLATE_INVOKE_METHOD_NO_OPT armv5te
+ op TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN armv5te
+ op TEMPLATE_MUL_LONG armv5te
+ op TEMPLATE_RETURN armv5te
+ op TEMPLATE_SHL_LONG armv5te
+ op TEMPLATE_SHR_LONG armv5te
+ op TEMPLATE_USHR_LONG armv5te
+ op TEMPLATE_THROW_EXCEPTION_COMMON armv5te
+ op TEMPLATE_STRING_COMPARETO armv5te
+ op TEMPLATE_STRING_INDEXOF armv5te
+ op TEMPLATE_INTERPRET armv5te
+ op TEMPLATE_MONITOR_ENTER armv5te
+ op TEMPLATE_MONITOR_ENTER_DEBUG armv5te
+ op TEMPLATE_PERIODIC_PROFILING armv5te
+ op TEMPLATE_INVOKE_METHOD_CHAIN_PROF armv5te
+ op TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF armv5te
+ op TEMPLATE_INVOKE_METHOD_NATIVE_PROF armv5te
+ op TEMPLATE_INVOKE_METHOD_NO_OPT_PROF armv5te
+ op TEMPLATE_RETURN_PROF armv5te
+
+op-end
+
+# "helper" code for C; include if you use any of the C stubs (this generates
+# object code, so it's normally excluded)
+##import c/gotoTargets.c
+
+# end of defs; include this when cstubs/stubdefs.c is included
+#import cstubs/enddefs.c
+
+# common subroutines for asm
+import armv5te/footer.S
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
index 331d90213..93a677e4f 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te-vfp.S
@@ -170,8 +170,8 @@ dvmCompiler_TEMPLATE_RETURN:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -272,8 +272,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -281,7 +281,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
/* ------------------------------ */
.balign 4
@@ -330,8 +330,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -453,8 +453,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -463,8 +463,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1508,8 +1508,8 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -1614,8 +1614,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1623,7 +1623,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
#undef TEMPLATE_INLINE_PROFILING
@@ -1676,8 +1676,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -1807,8 +1807,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1817,8 +1817,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1880,20 +1880,20 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -1919,7 +1919,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -1936,7 +1936,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
index 044843e1c..b9de01fc6 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv5te.S
@@ -170,8 +170,8 @@ dvmCompiler_TEMPLATE_RETURN:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -272,8 +272,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -281,7 +281,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
/* ------------------------------ */
.balign 4
@@ -330,8 +330,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -453,8 +453,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -463,8 +463,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -514,8 +514,8 @@ dvmCompiler_TEMPLATE_CMPG_DOUBLE:
/* op vAA, vBB, vCC */
push {r0-r3} @ save operands
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ ldr ip, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ blx ip
bhi .LTEMPLATE_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0, trumps less than
@@ -528,8 +528,8 @@ dvmCompiler_TEMPLATE_CMPG_DOUBLE:
.LTEMPLATE_CMPG_DOUBLE_gt_or_nan:
pop {r2-r3} @ restore operands in reverse order
pop {r0-r1} @ restore operands in reverse order
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
mov r0, #1 @ r1<- 1 or -1 for NaN
@@ -558,8 +558,8 @@ dvmCompiler_TEMPLATE_CMPL_DOUBLE:
/* op vAA, vBB, vCC */
push {r0-r3} @ save operands
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ ldr ip, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ blx ip
bhi .LTEMPLATE_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0, trumps less than
@@ -572,8 +572,8 @@ dvmCompiler_TEMPLATE_CMPL_DOUBLE:
.LTEMPLATE_CMPL_DOUBLE_gt_or_nan:
pop {r2-r3} @ restore operands in reverse order
pop {r0-r1} @ restore operands in reverse order
- mov lr, pc
- ldr pc, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
mvn r0, #0 @ r1<- 1 or -1 for NaN
@@ -622,8 +622,8 @@ dvmCompiler_TEMPLATE_CMPG_FLOAT:
mov r9, r0 @ Save copies - we may need to redo
mov r10, r1
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ ldr ip, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ blx ip
bhi .LTEMPLATE_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r0<- -1
moveq r0, #0 @ (equal) r0<- 0, trumps less than
@@ -634,8 +634,8 @@ dvmCompiler_TEMPLATE_CMPG_FLOAT:
.LTEMPLATE_CMPG_FLOAT_gt_or_nan:
mov r0, r10 @ restore in reverse order
mov r1, r9
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
mov r0, #1 @ r1<- 1 or -1 for NaN
@@ -684,8 +684,8 @@ dvmCompiler_TEMPLATE_CMPL_FLOAT:
mov r9, r0 @ Save copies - we may need to redo
mov r10, r1
mov r11, lr @ save return address
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ ldr ip, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ blx ip
bhi .LTEMPLATE_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
mvncc r0, #0 @ (less than) r0<- -1
moveq r0, #0 @ (equal) r0<- 0, trumps less than
@@ -696,8 +696,8 @@ dvmCompiler_TEMPLATE_CMPL_FLOAT:
.LTEMPLATE_CMPL_FLOAT_gt_or_nan:
mov r0, r10 @ restore in reverse order
mov r1, r9
- mov lr, pc
- ldr pc, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ ldr ip, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ blx ip
movcc r0, #1 @ (greater than) r1<- 1
bxcc r11
mvn r0, #0 @ r1<- 1 or -1 for NaN
@@ -1239,8 +1239,8 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -1345,8 +1345,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1354,7 +1354,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
#undef TEMPLATE_INLINE_PROFILING
@@ -1407,8 +1407,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -1538,8 +1538,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1548,8 +1548,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1611,20 +1611,20 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -1650,7 +1650,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -1667,7 +1667,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv6-vfp.S b/vm/compiler/template/out/CompilerTemplateAsm-armv6-vfp.S
new file mode 100644
index 000000000..737ef751f
--- /dev/null
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv6-vfp.S
@@ -0,0 +1,1981 @@
+/*
+ * This file was generated automatically by gen-template.py for 'armv6-vfp'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: armv5te/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(WITH_JIT)
+
+/*
+ * ARMv5 definitions and declarations.
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+JIT and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF thread pointer
+
+The following registers have fixed assignments in mterp but are scratch
+registers in compiled code
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+ sub _reg, _fpreg, #sizeofStackSaveArea
+
+#define EXPORT_PC() \
+ str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../../../mterp/common/asm-constants.h"
+
+/* File: armv5te-vfp/platform.S */
+/*
+ * ===========================================================================
+ * CPU-version-specific defines and utility
+ * ===========================================================================
+ */
+
+
+ .global dvmCompilerTemplateStart
+ .type dvmCompilerTemplateStart, %function
+ .text
+
+dvmCompilerTemplateStart:
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMP_LONG
+dvmCompiler_TEMPLATE_CMP_LONG:
+/* File: armv5te/TEMPLATE_CMP_LONG.S */
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .LTEMPLATE_CMP_LONG_less @ signed compare on high part
+ bgt .LTEMPLATE_CMP_LONG_greater
+ subs r0, r0, r2 @ r0<- r0 - r2
+ bxeq lr
+ bhi .LTEMPLATE_CMP_LONG_greater @ unsigned compare on low part
+.LTEMPLATE_CMP_LONG_less:
+ mvn r0, #0 @ r0<- -1
+ bx lr
+.LTEMPLATE_CMP_LONG_greater:
+ mov r0, #1 @ r0<- 1
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_RETURN
+dvmCompiler_TEMPLATE_RETURN:
+/* File: armv5te/TEMPLATE_RETURN.S */
+ /*
+ * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+ * If the stored value in returnAddr
+ * is non-zero, the caller is compiled by the JIT thus return to the
+ * address in the code cache following the invoke instruction. Otherwise
+ * return to the special dvmJitToInterpNoChain entry point.
+ */
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rSELF
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
+ SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
+ ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+ ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+ mov r9, #0 @ disable chaining
+#endif
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ @ r2<- method we're returning to
+ cmp r2, #0 @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
+ beq 1f @ bail to interpreter
+#else
+ blxeq lr @ punt to interpreter and compare state
+#endif
+ ldr r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
+ mov rFP, r10 @ publish new FP
+ ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+
+ str r2, [rSELF, #offThread_method]@ self->method = newSave->method
+ ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ add rPC, rPC, #6 @ publish new rPC (advance 6 bytes)
+ str r0, [rSELF, #offThread_methodClassDex]
+ cmp r8, #0 @ check the break flags
+ movne r9, #0 @ clear the chaining cell address
+ str r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
+ cmp r9, #0 @ chaining cell exists?
+ blxne r9 @ jump to the chaining cell
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1 @ callsite is interpreted
+1:
+ mov r0, #0
+ str r0, [rSELF, #offThread_inJitCodeCache] @ reset inJitCodeCache
+ stmia rSELF, {rPC, rFP} @ SAVE_PC_FP_TO_SELF()
+ ldr r2, .LdvmMterpStdBail @ defined in footer.S
+ mov r0, rSELF @ Expecting rSELF in r0
+ blx r2 @ exit the interpreter
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+ /*
+ * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+ * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+ * runtime-resolved callee.
+ */
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+ ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns
+
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne lr @ bail to the interpreter
+ tst r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
+ bne .LinvokeNative
+#else
+ bxne lr @ bail to the interpreter
+#endif
+
+ ldr r10, .LdvmJitToInterpTraceSelectNoChain
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ @ Start executing the callee
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kInlineCacheMiss
+#endif
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+ /*
+ * For monomorphic callsite, setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ */
+ @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
+ @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
+ @ methodToCall is guaranteed to be non-native
+.LinvokeChain:
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ add r12, lr, #2 @ setup the punt-to-interp address
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo r12 @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne r12 @ bail to the interpreter
+
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
+
+ bx lr @ return to the callee-chaining cell
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+ /*
+ * For polymorphic callsite, check whether the cached class pointer matches
+ * the current one. If so setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ *
+ * The predicted chaining cell is declared in ArmLIR.h with the
+ * following layout:
+ *
+ * typedef struct PredictedChainingCell {
+ * u4 branch;
+ * const ClassObject *clazz;
+ * const Method *method;
+ * u4 counter;
+ * } PredictedChainingCell;
+ *
+ * Upon returning to the callsite:
+ * - lr : to branch to the chaining cell
+ * - lr+2: to punt to the interpreter
+ * - lr+4: to fully resolve the callee and may rechain.
+ * r3 <- class
+ * r9 <- counter
+ */
+ @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
+ ldr r3, [r0, #offObject_clazz] @ r3 <- this->class
+ ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz
+ ldr r0, [r2, #8] @ r0 <- predictedChainCell->method
+ ldr r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
+ cmp r3, r8 @ predicted class == actual class?
+#if defined(WITH_JIT_TUNING)
+ ldr r7, .LdvmICHitCount
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ bne 101f
+ ldr r10, [r7, #0]
+101:
+#else
+ ldreq r10, [r7, #0]
+#endif
+ add r10, r10, #1
+ streq r10, [r7, #0]
+#endif
+ ldreqh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldreqh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ beq .LinvokeChain @ predicted chain is valid
+ ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
+ cmp r8, #0 @ initialized class or not
+ moveq r1, #0
+ subne r1, r9, #1 @ count--
+ strne r1, [rSELF, #offThread_icRechainCount] @ write back to thread
+ add lr, lr, #4 @ return to fully-resolve landing pad
+ /*
+ * r1 <- count
+ * r2 <- &predictedChainCell
+ * r3 <- this->class
+ * r4 <- dPC
+ * r7 <- this->class->vtable
+ */
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ @ r7 = methodToCall->registersSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+ bxne lr @ bail to the interpreter
+#else
+ bx lr @ bail to interpreter unconditionally
+#endif
+
+ @ go ahead and transfer control to the native code
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ mov r2, #0
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
+ str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+ @ newFp->localRefCookie=top
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
+
+ mov r2, r0 @ arg2<- methodToCall
+ mov r0, r1 @ arg0<- newFP
+ add r1, rSELF, #offThread_retval @ arg1<- &retval
+ mov r3, rSELF @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rSELF
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ blx r8 @ off to the native code
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
+#endif
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+
+ @ r0 = dalvikCallsitePC
+ bne .LhandleException @ no, handle exception
+
+ str r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
+ cmp r2, #0 @ return chaining cell still exists?
+ bxne r2 @ yes - go ahead
+
+ @ continue executing the next instruction through the interpreter
+ ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+ add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MUL_LONG
+dvmCompiler_TEMPLATE_MUL_LONG:
+/* File: armv5te/TEMPLATE_MUL_LONG.S */
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * For JIT: op1 in r0/r1, op2 in r2/r3, return in r0/r1
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ mov r0,r9
+ mov r1,r10
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_SHL_LONG
+dvmCompiler_TEMPLATE_SHL_LONG:
+/* File: armv5te/TEMPLATE_SHL_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ */
+ /* shl-long vAA, vBB, vCC */
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_SHR_LONG
+dvmCompiler_TEMPLATE_SHR_LONG:
+/* File: armv5te/TEMPLATE_SHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ */
+ /* shr-long vAA, vBB, vCC */
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_USHR_LONG
+dvmCompiler_TEMPLATE_USHR_LONG:
+/* File: armv5te/TEMPLATE_USHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_ADD_FLOAT_VFP
+dvmCompiler_TEMPLATE_ADD_FLOAT_VFP:
+/* File: armv5te-vfp/TEMPLATE_ADD_FLOAT_VFP.S */
+/* File: armv5te-vfp/fbinop.S */
+ /*
+ * Generic 32-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ flds s0,[r1]
+ flds s1,[r2]
+ fadds s2, s0, s1
+ fsts s2,[r0]
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_SUB_FLOAT_VFP
+dvmCompiler_TEMPLATE_SUB_FLOAT_VFP:
+/* File: armv5te-vfp/TEMPLATE_SUB_FLOAT_VFP.S */
+/* File: armv5te-vfp/fbinop.S */
+ /*
+ * Generic 32-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ flds s0,[r1]
+ flds s1,[r2]
+ fsubs s2, s0, s1
+ fsts s2,[r0]
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MUL_FLOAT_VFP
+dvmCompiler_TEMPLATE_MUL_FLOAT_VFP:
+/* File: armv5te-vfp/TEMPLATE_MUL_FLOAT_VFP.S */
+/* File: armv5te-vfp/fbinop.S */
+ /*
+ * Generic 32-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ flds s0,[r1]
+ flds s1,[r2]
+ fmuls s2, s0, s1
+ fsts s2,[r0]
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_DIV_FLOAT_VFP
+dvmCompiler_TEMPLATE_DIV_FLOAT_VFP:
+/* File: armv5te-vfp/TEMPLATE_DIV_FLOAT_VFP.S */
+/* File: armv5te-vfp/fbinop.S */
+ /*
+ * Generic 32-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ flds s0,[r1]
+ flds s1,[r2]
+ fdivs s2, s0, s1
+ fsts s2,[r0]
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_ADD_DOUBLE_VFP
+dvmCompiler_TEMPLATE_ADD_DOUBLE_VFP:
+/* File: armv5te-vfp/TEMPLATE_ADD_DOUBLE_VFP.S */
+/* File: armv5te-vfp/fbinopWide.S */
+ /*
+ * Generic 64-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ fldd d0,[r1]
+ fldd d1,[r2]
+ faddd d2, d0, d1
+ fstd d2,[r0]
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_SUB_DOUBLE_VFP
+dvmCompiler_TEMPLATE_SUB_DOUBLE_VFP:
+/* File: armv5te-vfp/TEMPLATE_SUB_DOUBLE_VFP.S */
+/* File: armv5te-vfp/fbinopWide.S */
+ /*
+ * Generic 64-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ fldd d0,[r1]
+ fldd d1,[r2]
+ fsubd d2, d0, d1
+ fstd d2,[r0]
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MUL_DOUBLE_VFP
+dvmCompiler_TEMPLATE_MUL_DOUBLE_VFP:
+/* File: armv5te-vfp/TEMPLATE_MUL_DOUBLE_VFP.S */
+/* File: armv5te-vfp/fbinopWide.S */
+ /*
+ * Generic 64-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ fldd d0,[r1]
+ fldd d1,[r2]
+ fmuld d2, d0, d1
+ fstd d2,[r0]
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_DIV_DOUBLE_VFP
+dvmCompiler_TEMPLATE_DIV_DOUBLE_VFP:
+/* File: armv5te-vfp/TEMPLATE_DIV_DOUBLE_VFP.S */
+/* File: armv5te-vfp/fbinopWide.S */
+ /*
+ * Generic 64-bit floating point operation. Provide an "instr" line that
+ * specifies an instruction that performs s2 = s0 op s1.
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = op1 address
+ * r2 = op2 address
+ */
+ fldd d0,[r1]
+ fldd d1,[r2]
+ fdivd d2, d0, d1
+ fstd d2,[r0]
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_DOUBLE_TO_FLOAT_VFP
+dvmCompiler_TEMPLATE_DOUBLE_TO_FLOAT_VFP:
+/* File: armv5te-vfp/TEMPLATE_DOUBLE_TO_FLOAT_VFP.S */
+/* File: armv5te-vfp/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = src dalvik register address
+ */
+ /* unop vA, vB */
+ fldd d0, [r1] @ d0<- vB
+ fcvtsd s0, d0 @ s0<- op d0
+ fsts s0, [r0] @ vA<- s0
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_DOUBLE_TO_INT_VFP
+dvmCompiler_TEMPLATE_DOUBLE_TO_INT_VFP:
+/* File: armv5te-vfp/TEMPLATE_DOUBLE_TO_INT_VFP.S */
+/* File: armv5te-vfp/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = src dalvik register address
+ */
+ /* unop vA, vB */
+ fldd d0, [r1] @ d0<- vB
+ ftosizd s0, d0 @ s0<- op d0
+ fsts s0, [r0] @ vA<- s0
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_FLOAT_TO_DOUBLE_VFP
+dvmCompiler_TEMPLATE_FLOAT_TO_DOUBLE_VFP:
+/* File: armv5te-vfp/TEMPLATE_FLOAT_TO_DOUBLE_VFP.S */
+/* File: armv5te-vfp/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = src dalvik register address
+ */
+ /* unop vA, vB */
+ flds s0, [r1] @ s0<- vB
+ fcvtds d0, s0 @ d0<- op s0
+ fstd d0, [r0] @ vA<- d0
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_FLOAT_TO_INT_VFP
+dvmCompiler_TEMPLATE_FLOAT_TO_INT_VFP:
+/* File: armv5te-vfp/TEMPLATE_FLOAT_TO_INT_VFP.S */
+/* File: armv5te-vfp/funop.S */
+ /*
+ * Generic 32bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "s1 = op s0".
+ *
+ * For: float-to-int, int-to-float
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = src dalvik register address
+ */
+ /* unop vA, vB */
+ flds s0, [r1] @ s0<- vB
+ ftosizs s1, s0 @ s1<- op s0
+ fsts s1, [r0] @ vA<- s1
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INT_TO_DOUBLE_VFP
+dvmCompiler_TEMPLATE_INT_TO_DOUBLE_VFP:
+/* File: armv5te-vfp/TEMPLATE_INT_TO_DOUBLE_VFP.S */
+/* File: armv5te-vfp/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = src dalvik register address
+ */
+ /* unop vA, vB */
+ flds s0, [r1] @ s0<- vB
+ fsitod d0, s0 @ d0<- op s0
+ fstd d0, [r0] @ vA<- d0
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INT_TO_FLOAT_VFP
+dvmCompiler_TEMPLATE_INT_TO_FLOAT_VFP:
+/* File: armv5te-vfp/TEMPLATE_INT_TO_FLOAT_VFP.S */
+/* File: armv5te-vfp/funop.S */
+ /*
+ * Generic 32bit-to-32bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "s1 = op s0".
+ *
+ * For: float-to-int, int-to-float
+ *
+ * On entry:
+ * r0 = target dalvik register address
+ * r1 = src dalvik register address
+ */
+ /* unop vA, vB */
+ flds s0, [r1] @ s0<- vB
+ fsitos s1, s0 @ s1<- op s0
+ fsts s1, [r0] @ vA<- s1
+ bx lr
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMPG_DOUBLE_VFP
+dvmCompiler_TEMPLATE_CMPG_DOUBLE_VFP:
+/* File: armv5te-vfp/TEMPLATE_CMPG_DOUBLE_VFP.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ *
+ * On entry:
+ * r0 = &op1 [vBB]
+ * r1 = &op2 [vCC]
+ */
+ /* op vAA, vBB, vCC */
+ fldd d0, [r0] @ d0<- vBB
+ fldd d1, [r1] @ d1<- vCC
+ fcmpd d0, d1 @ compare (vBB, vCC)
+ mov r0, #1 @ r0<- 1 (default)
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r0<- -1
+ moveq r0, #0 @ (equal) r0<- 0
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMPL_DOUBLE_VFP
+dvmCompiler_TEMPLATE_CMPL_DOUBLE_VFP:
+/* File: armv5te-vfp/TEMPLATE_CMPL_DOUBLE_VFP.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ * On entry:
+ * r0 = &op1 [vBB]
+ * r1 = &op2 [vCC]
+ */
+ /* op vAA, vBB, vCC */
+ fldd d0, [r0] @ d0<- vBB
+ fldd d1, [r1] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ mvn r0, #0 @ r0<- -1 (default)
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r0<- 1
+ moveq r0, #0 @ (equal) r0<- 0
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMPG_FLOAT_VFP
+dvmCompiler_TEMPLATE_CMPG_FLOAT_VFP:
+/* File: armv5te-vfp/TEMPLATE_CMPG_FLOAT_VFP.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ * On entry:
+ * r0 = &op1 [vBB]
+ * r1 = &op2 [vCC]
+ */
+ /* op vAA, vBB, vCC */
+ flds s0, [r0] @ d0<- vBB
+ flds s1, [r1] @ d1<- vCC
+ fcmps s0, s1 @ compare (vBB, vCC)
+ mov r0, #1 @ r0<- 1 (default)
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r0<- -1
+ moveq r0, #0 @ (equal) r0<- 0
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMPL_FLOAT_VFP
+dvmCompiler_TEMPLATE_CMPL_FLOAT_VFP:
+/* File: armv5te-vfp/TEMPLATE_CMPL_FLOAT_VFP.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ * On entry:
+ * r0 = &op1 [vBB]
+ * r1 = &op2 [vCC]
+ */
+ /* op vAA, vBB, vCC */
+ flds s0, [r0] @ d0<- vBB
+ flds s1, [r1] @ d1<- vCC
+ fcmps s0, s1 @ compare (vBB, vCC)
+ mvn r0, #0 @ r0<- -1 (default)
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r0<- 1
+ moveq r0, #0 @ (equal) r0<- 0
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_SQRT_DOUBLE_VFP
+dvmCompiler_TEMPLATE_SQRT_DOUBLE_VFP:
+/* File: armv5te-vfp/TEMPLATE_SQRT_DOUBLE_VFP.S */
+ /*
+ * 64-bit floating point vfp sqrt operation.
+ * If the result is a NaN, bail out to library code to do
+ * the right thing.
+ *
+ * On entry:
+ * r2 src addr of op1
+ * On exit:
+ * r0,r1 = res
+ */
+ fldd d0, [r2]
+ fsqrtd d1, d0
+ fcmpd d1, d1
+ fmstat
+ fmrrd r0, r1, d1
+ bxeq lr @ Result OK - return
+ ldr r2, .Lsqrt
+ fmrrd r0, r1, d0 @ reload orig operand
+ bx r2 @ tail call to sqrt library routine
+
+.Lsqrt:
+ .word sqrt
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON
+dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON:
+/* File: armv5te/TEMPLATE_THROW_EXCEPTION_COMMON.S */
+ /*
+ * Throw an exception from JIT'ed code.
+ * On entry:
+ * r0 Dalvik PC that raises the exception
+ */
+ b .LhandleException
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MEM_OP_DECODE
+dvmCompiler_TEMPLATE_MEM_OP_DECODE:
+/* File: armv5te-vfp/TEMPLATE_MEM_OP_DECODE.S */
+#if defined(WITH_SELF_VERIFICATION)
+ /*
+ * This handler encapsulates heap memory ops for selfVerification mode.
+ *
+ * The call to the handler is inserted prior to a heap memory operation.
+ * This handler then calls a function to decode the memory op, and process
+ * it accordingly. Afterwards, the handler changes the return address to
+ * skip the memory op so it never gets executed.
+ */
+ vpush {d0-d15} @ save out all fp registers
+ push {r0-r12,lr} @ save out all registers
+ ldr r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
+ mov r0, lr @ arg0 <- link register
+ mov r1, sp @ arg1 <- stack pointer
+ blx r2 @ decode and handle the mem op
+ pop {r0-r12,lr} @ restore all registers
+ vpop {d0-d15} @ restore all fp registers
+ bx lr @ return to compiled code
+#endif
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_STRING_COMPARETO
+dvmCompiler_TEMPLATE_STRING_COMPARETO:
+/* File: armv5te/TEMPLATE_STRING_COMPARETO.S */
+ /*
+ * String's compareTo.
+ *
+ * Requires r0/r1 to have been previously checked for null. Will
+ * return negative if this's string is < comp, 0 if they are the
+ * same and positive if >.
+ *
+ * IMPORTANT NOTE:
+ *
+ * This code relies on hard-coded offsets for string objects, and must be
+ * kept in sync with definitions in UtfString.h. See asm-constants.h
+ *
+ * On entry:
+ * r0: this object pointer
+ * r1: comp object pointer
+ *
+ */
+
+ mov r2, r0 @ this to r2, opening up r0 for return value
+ subs r0, r2, r1 @ Same?
+ bxeq lr
+
+ ldr r4, [r2, #STRING_FIELDOFF_OFFSET]
+ ldr r9, [r1, #STRING_FIELDOFF_OFFSET]
+ ldr r7, [r2, #STRING_FIELDOFF_COUNT]
+ ldr r10, [r1, #STRING_FIELDOFF_COUNT]
+ ldr r2, [r2, #STRING_FIELDOFF_VALUE]
+ ldr r1, [r1, #STRING_FIELDOFF_VALUE]
+
+ /*
+ * At this point, we have:
+ * value: r2/r1
+ * offset: r4/r9
+ * count: r7/r10
+ * We're going to compute
+ * r11 <- countDiff
+ * r10 <- minCount
+ */
+ subs r11, r7, r10
+ movls r10, r7
+
+ /* Now, build pointers to the string data */
+ add r2, r2, r4, lsl #1
+ add r1, r1, r9, lsl #1
+ /*
+ * Note: data pointers point to previous element so we can use pre-index
+ * mode with base writeback.
+ */
+ add r2, #16-2 @ offset to contents[-1]
+ add r1, #16-2 @ offset to contents[-1]
+
+ /*
+ * At this point we have:
+ * r2: *this string data
+ * r1: *comp string data
+ * r10: iteration count for comparison
+ * r11: value to return if the first part of the string is equal
+ * r0: reserved for result
+ * r3, r4, r7, r8, r9, r12 available for loading string data
+ */
+
+ subs r10, #2
+ blt do_remainder2
+
+ /*
+ * Unroll the first two checks so we can quickly catch early mismatch
+ * on long strings (but preserve incoming alignment)
+ */
+
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ ldrh r7, [r2, #2]!
+ ldrh r8, [r1, #2]!
+ subs r0, r3, r4
+ subeqs r0, r7, r8
+ bxne lr
+ cmp r10, #28
+ bgt do_memcmp16
+ subs r10, #3
+ blt do_remainder
+
+loopback_triple:
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ ldrh r7, [r2, #2]!
+ ldrh r8, [r1, #2]!
+ ldrh r9, [r2, #2]!
+ ldrh r12,[r1, #2]!
+ subs r0, r3, r4
+ subeqs r0, r7, r8
+ subeqs r0, r9, r12
+ bxne lr
+ subs r10, #3
+ bge loopback_triple
+
+do_remainder:
+ adds r10, #3
+ beq returnDiff
+
+loopback_single:
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ subs r0, r3, r4
+ bxne lr
+ subs r10, #1
+ bne loopback_single
+
+returnDiff:
+ mov r0, r11
+ bx lr
+
+do_remainder2:
+ adds r10, #2
+ bne loopback_single
+ mov r0, r11
+ bx lr
+
+ /* Long string case */
+do_memcmp16:
+ mov r4, lr
+ ldr lr, .Lmemcmp16
+ mov r7, r11
+ add r0, r2, #2
+ add r1, r1, #2
+ mov r2, r10
+ blx lr
+ cmp r0, #0
+ bxne r4
+ mov r0, r7
+ bx r4
+
+.Lmemcmp16:
+ .word __memcmp16
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_STRING_INDEXOF
+dvmCompiler_TEMPLATE_STRING_INDEXOF:
+/* File: armv5te/TEMPLATE_STRING_INDEXOF.S */
+ /*
+ * String's indexOf.
+ *
+ * Requires r0 to have been previously checked for null. Will
+ * return index of match of r1 in r0.
+ *
+ * IMPORTANT NOTE:
+ *
+ * This code relies on hard-coded offsets for string objects, and must be
+ * kept in sync wth definitions in UtfString.h See asm-constants.h
+ *
+ * On entry:
+ * r0: string object pointer
+ * r1: char to match
+ * r2: Starting offset in string data
+ */
+
+ ldr r7, [r0, #STRING_FIELDOFF_OFFSET]
+ ldr r8, [r0, #STRING_FIELDOFF_COUNT]
+ ldr r0, [r0, #STRING_FIELDOFF_VALUE]
+
+ /*
+ * At this point, we have:
+ * r0: object pointer
+ * r1: char to match
+ * r2: starting offset
+ * r7: offset
+ * r8: string length
+ */
+
+ /* Build pointer to start of string data */
+ add r0, #16
+ add r0, r0, r7, lsl #1
+
+ /* Save a copy of starting data in r7 */
+ mov r7, r0
+
+ /* Clamp start to [0..count] */
+ cmp r2, #0
+ movlt r2, #0
+ cmp r2, r8
+ movgt r2, r8
+
+ /* Build pointer to start of data to compare and pre-bias */
+ add r0, r0, r2, lsl #1
+ sub r0, #2
+
+ /* Compute iteration count */
+ sub r8, r2
+
+ /*
+ * At this point we have:
+ * r0: start of data to test
+ * r1: chat to compare
+ * r8: iteration count
+ * r7: original start of string
+ * r3, r4, r9, r10, r11, r12 available for loading string data
+ */
+
+ subs r8, #4
+ blt indexof_remainder
+
+indexof_loop4:
+ ldrh r3, [r0, #2]!
+ ldrh r4, [r0, #2]!
+ ldrh r10, [r0, #2]!
+ ldrh r11, [r0, #2]!
+ cmp r3, r1
+ beq match_0
+ cmp r4, r1
+ beq match_1
+ cmp r10, r1
+ beq match_2
+ cmp r11, r1
+ beq match_3
+ subs r8, #4
+ bge indexof_loop4
+
+indexof_remainder:
+ adds r8, #4
+ beq indexof_nomatch
+
+indexof_loop1:
+ ldrh r3, [r0, #2]!
+ cmp r3, r1
+ beq match_3
+ subs r8, #1
+ bne indexof_loop1
+
+indexof_nomatch:
+ mov r0, #-1
+ bx lr
+
+match_0:
+ sub r0, #6
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+match_1:
+ sub r0, #4
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+match_2:
+ sub r0, #2
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+match_3:
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INTERPRET
+dvmCompiler_TEMPLATE_INTERPRET:
+/* File: armv5te/TEMPLATE_INTERPRET.S */
+ /*
+ * This handler transfers control to the interpeter without performing
+ * any lookups. It may be called either as part of a normal chaining
+ * operation, or from the transition code in header.S. We distinquish
+ * the two cases by looking at the link register. If called from a
+ * translation chain, it will point to the chaining Dalvik PC -3.
+ * On entry:
+ * lr - if NULL:
+ * r1 - the Dalvik PC to begin interpretation.
+ * else
+ * [lr, #3] contains Dalvik PC to begin interpretation
+ * rSELF - pointer to thread
+ * rFP - Dalvik frame pointer
+ */
+ cmp lr, #0
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ beq 101f
+ ldr r1,[lr, #3]
+101:
+#else
+ ldrne r1,[lr, #3]
+#endif
+ ldr r2, .LinterpPunt
+ mov r0, r1 @ set Dalvik PC
+ bx r2
+ @ doesn't return
+
+.LinterpPunt:
+ .word dvmJitToInterpPunt
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MONITOR_ENTER
+dvmCompiler_TEMPLATE_MONITOR_ENTER:
+/* File: armv5te/TEMPLATE_MONITOR_ENTER.S */
+ /*
+ * Call out to the runtime to lock an object. Because this thread
+ * may have been suspended in THREAD_MONITOR state and the Jit's
+ * translation cache subsequently cleared, we cannot return directly.
+ * Instead, unconditionally transition to the interpreter to resume.
+ *
+ * On entry:
+ * r0 - self pointer
+ * r1 - the object (which has already been null-checked by the caller
+ * r4 - the Dalvik PC of the following instruction.
+ */
+ ldr r2, .LdvmLockObject
+ mov r3, #0 @ Record that we're not returning
+ str r3, [r0, #offThread_inJitCodeCache]
+ blx r2 @ dvmLockObject(self, obj)
+ ldr r2, .LdvmJitToInterpNoChain
+ @ Bail to interpreter - no chain [note - r4 still contains rPC]
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kHeavyweightMonitor
+#endif
+ bx r2
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG
+dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG:
+/* File: armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S */
+ /*
+ * To support deadlock prediction, this version of MONITOR_ENTER
+ * will always call the heavyweight dvmLockObject, check for an
+ * exception and then bail out to the interpreter.
+ *
+ * On entry:
+ * r0 - self pointer
+ * r1 - the object (which has already been null-checked by the caller
+ * r4 - the Dalvik PC of the following instruction.
+ *
+ */
+ ldr r2, .LdvmLockObject
+ mov r3, #0 @ Record that we're not returning
+ str r3, [r0, #offThread_inJitCodeCache]
+ blx r2 @ dvmLockObject(self, obj)
+ @ test for exception
+ ldr r1, [rSELF, #offThread_exception]
+ cmp r1, #0
+ beq 1f
+ ldr r2, .LhandleException
+ sub r0, r4, #2 @ roll dPC back to this monitor instruction
+ bx r2
+1:
+ @ Bail to interpreter - no chain [note - r4 still contains rPC]
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kHeavyweightMonitor
+#endif
+ ldr pc, .LdvmJitToInterpNoChain
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_PERIODIC_PROFILING
+dvmCompiler_TEMPLATE_PERIODIC_PROFILING:
+/* File: armv5te/TEMPLATE_PERIODIC_PROFILING.S */
+ /*
+ * Increment profile counter for this trace, and decrement
+ * sample counter. If sample counter goes below zero, turn
+ * off profiling.
+ *
+ * On entry
+ * (lr-11) is address of pointer to counter. Note: the counter
+ * actually exists 10 bytes before the return target, but because
+ * we are arriving from thumb mode, lr will have its low bit set.
+ */
+ ldr r0, [lr,#-11]
+ ldr r1, [rSELF, #offThread_pProfileCountdown]
+ ldr r2, [r0] @ get counter
+ ldr r3, [r1] @ get countdown timer
+ add r2, #1
+ subs r2, #1
+ blt .LTEMPLATE_PERIODIC_PROFILING_disable_profiling
+ str r2, [r0]
+ str r3, [r1]
+ bx lr
+
+.LTEMPLATE_PERIODIC_PROFILING_disable_profiling:
+ mov r4, lr @ preserve lr
+ ldr r0, .LdvmJitTraceProfilingOff
+ blx r0
+ bx r4
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_RETURN_PROF
+dvmCompiler_TEMPLATE_RETURN_PROF:
+/* File: armv5te/TEMPLATE_RETURN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_RETURN.S */
+ /*
+ * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+ * If the stored value in returnAddr
+ * is non-zero, the caller is compiled by the JIT thus return to the
+ * address in the code cache following the invoke instruction. Otherwise
+ * return to the special dvmJitToInterpNoChain entry point.
+ */
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rSELF
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
+ SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
+ ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+ ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+ mov r9, #0 @ disable chaining
+#endif
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ @ r2<- method we're returning to
+ cmp r2, #0 @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
+ beq 1f @ bail to interpreter
+#else
+ blxeq lr @ punt to interpreter and compare state
+#endif
+ ldr r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
+ mov rFP, r10 @ publish new FP
+ ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+
+ str r2, [rSELF, #offThread_method]@ self->method = newSave->method
+ ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ add rPC, rPC, #6 @ publish new rPC (advance 6 bytes)
+ str r0, [rSELF, #offThread_methodClassDex]
+ cmp r8, #0 @ check the break flags
+ movne r9, #0 @ clear the chaining cell address
+ str r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
+ cmp r9, #0 @ chaining cell exists?
+ blxne r9 @ jump to the chaining cell
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1 @ callsite is interpreted
+1:
+ mov r0, #0
+ str r0, [rSELF, #offThread_inJitCodeCache] @ reset inJitCodeCache
+ stmia rSELF, {rPC, rFP} @ SAVE_PC_FP_TO_SELF()
+ ldr r2, .LdvmMterpStdBail @ defined in footer.S
+ mov r0, rSELF @ Expecting rSELF in r0
+ blx r2 @ exit the interpreter
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+ /*
+ * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+ * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+ * runtime-resolved callee.
+ */
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+ ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns
+
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne lr @ bail to the interpreter
+ tst r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
+ bne .LinvokeNative
+#else
+ bxne lr @ bail to the interpreter
+#endif
+
+ ldr r10, .LdvmJitToInterpTraceSelectNoChain
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ @ Start executing the callee
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kInlineCacheMiss
+#endif
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+ /*
+ * For monomorphic callsite, setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ */
+ @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
+ @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
+ @ methodToCall is guaranteed to be non-native
+.LinvokeChainProf:
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ add r12, lr, #2 @ setup the punt-to-interp address
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo r12 @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne r12 @ bail to the interpreter
+
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
+
+ bx lr @ return to the callee-chaining cell
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+ /*
+ * For polymorphic callsite, check whether the cached class pointer matches
+ * the current one. If so setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ *
+ * The predicted chaining cell is declared in ArmLIR.h with the
+ * following layout:
+ *
+ * typedef struct PredictedChainingCell {
+ * u4 branch;
+ * const ClassObject *clazz;
+ * const Method *method;
+ * u4 counter;
+ * } PredictedChainingCell;
+ *
+ * Upon returning to the callsite:
+ * - lr : to branch to the chaining cell
+ * - lr+2: to punt to the interpreter
+ * - lr+4: to fully resolve the callee and may rechain.
+ * r3 <- class
+ * r9 <- counter
+ */
+ @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
+ ldr r3, [r0, #offObject_clazz] @ r3 <- this->class
+ ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz
+ ldr r0, [r2, #8] @ r0 <- predictedChainCell->method
+ ldr r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
+ cmp r3, r8 @ predicted class == actual class?
+#if defined(WITH_JIT_TUNING)
+ ldr r7, .LdvmICHitCount
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ bne 101f
+ ldr r10, [r7, #0]
+101:
+#else
+ ldreq r10, [r7, #0]
+#endif
+ add r10, r10, #1
+ streq r10, [r7, #0]
+#endif
+ ldreqh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldreqh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ beq .LinvokeChainProf @ predicted chain is valid
+ ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
+ cmp r8, #0 @ initialized class or not
+ moveq r1, #0
+ subne r1, r9, #1 @ count--
+ strne r1, [rSELF, #offThread_icRechainCount] @ write back to thread
+ add lr, lr, #4 @ return to fully-resolve landing pad
+ /*
+ * r1 <- count
+ * r2 <- &predictedChainCell
+ * r3 <- this->class
+ * r4 <- dPC
+ * r7 <- this->class->vtable
+ */
+ bx lr
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ @ r7 = methodToCall->registersSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+ bxne lr @ bail to the interpreter
+#else
+ bx lr @ bail to interpreter unconditionally
+#endif
+
+ @ go ahead and transfer control to the native code
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ mov r2, #0
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
+ str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+ @ newFp->localRefCookie=top
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
+
+ mov r2, r0 @ arg2<- methodToCall
+ mov r0, r1 @ arg0<- newFP
+ add r1, rSELF, #offThread_retval @ arg1<- &retval
+ mov r3, rSELF @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rSELF
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ blx r8 @ off to the native code
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
+#endif
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+
+ @ r0 = dalvikCallsitePC
+ bne .LhandleException @ no, handle exception
+
+ str r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
+ cmp r2, #0 @ return chaining cell still exists?
+ bxne r2 @ yes - go ahead
+
+ @ continue executing the next instruction through the interpreter
+ ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+ add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1
+
+#undef TEMPLATE_INLINE_PROFILING
+
+ .size dvmCompilerTemplateStart, .-dvmCompilerTemplateStart
+/* File: armv5te/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+.LinvokeNative:
+ @ Prep for the native call
+ @ r1 = newFP, r0 = methodToCall
+ mov r2, #0
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ str r2, [rSELF, #offThread_inJitCodeCache] @ not in jit code cache
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+ @ newFp->localRefCookie=top
+ ldrh lr, [rSELF, #offThread_subMode]
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
+
+ mov r2, r0 @ r2<- methodToCall
+ mov r0, r1 @ r0<- newFP
+ add r1, rSELF, #offThread_retval @ r1<- &retval
+ mov r3, rSELF @ arg3<- self
+ ands lr, #kSubModeMethodTrace
+ beq 121f @ hop if not profiling
+ @ r2: methodToCall, r6: rSELF
+ stmfd sp!, {r2,r6}
+ stmfd sp!, {r0-r3}
+ mov r0, r2
+ mov r1, r6
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3}
+
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
+
+ ldmfd sp!, {r0-r1}
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
+ b 212f
+121:
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
+212:
+
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ ldr r0, [r10, #offStackSaveArea_savedPc] @ reload rPC
+
+ @ r0 = dalvikCallsitePC
+ bne .LhandleException @ no, handle exception
+
+ str r2, [rSELF, #offThread_inJitCodeCache] @ set the new mode
+ cmp r2, #0 @ return chaining cell still exists?
+ bxne r2 @ yes - go ahead
+
+ @ continue executing the next instruction through the interpreter
+ ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+ add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ bx r1
+
+/*
+ * On entry:
+ * r0 Faulting Dalvik PC
+ */
+.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+ ldr pc, .LdeadFood @ should not see this under self-verification mode
+.LdeadFood:
+ .word 0xdeadf00d
+#endif
+ mov r2, #0
+ str r2, [rSELF, #offThread_inJitCodeCache] @ in interpreter land
+ ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
+ ldr rIBASE, .LdvmAsmInstructionStart @ same as above
+ mov rPC, r0 @ reload the faulting Dalvik address
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
+
+ .align 2
+.LdvmAsmInstructionStart:
+ .word dvmAsmInstructionStart
+.LdvmJitToInterpNoChainNoProfile:
+ .word dvmJitToInterpNoChainNoProfile
+.LdvmJitToInterpTraceSelectNoChain:
+ .word dvmJitToInterpTraceSelectNoChain
+.LdvmJitToInterpNoChain:
+ .word dvmJitToInterpNoChain
+.LdvmMterpStdBail:
+ .word dvmMterpStdBail
+.LdvmMterpCommonExceptionThrown:
+ .word dvmMterpCommonExceptionThrown
+.LdvmLockObject:
+ .word dvmLockObject
+.LdvmJitTraceProfilingOff:
+ .word dvmJitTraceProfilingOff
+#if defined(WITH_JIT_TUNING)
+.LdvmICHitCount:
+ .word gDvmICHitCount
+#endif
+#if defined(WITH_SELF_VERIFICATION)
+.LdvmSelfVerificationMemOpDecode:
+ .word dvmSelfVerificationMemOpDecode
+#endif
+.LdvmFastMethodTraceEnter:
+ .word dvmFastMethodTraceEnter
+.LdvmFastNativeMethodTraceExit:
+ .word dvmFastNativeMethodTraceExit
+.LdvmFastMethodTraceExit:
+ .word dvmFastMethodTraceExit
+.L__aeabi_cdcmple:
+ .word __aeabi_cdcmple
+.L__aeabi_cfcmple:
+ .word __aeabi_cfcmple
+
+ .global dmvCompilerTemplateEnd
+dmvCompilerTemplateEnd:
+
+#endif /* WITH_JIT */
+
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv6j.S b/vm/compiler/template/out/CompilerTemplateAsm-armv6j.S
new file mode 100644
index 000000000..64110314d
--- /dev/null
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv6j.S
@@ -0,0 +1,1712 @@
+/*
+ * This file was generated automatically by gen-template.py for 'armv6j'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: armv5te/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(WITH_JIT)
+
+/*
+ * ARMv5 definitions and declarations.
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+JIT and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF thread pointer
+
+The following registers have fixed assignments in mterp but are scratch
+registers in compiled code
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+ sub _reg, _fpreg, #sizeofStackSaveArea
+
+#define EXPORT_PC() \
+ str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../../../mterp/common/asm-constants.h"
+
+/* File: armv5te-vfp/platform.S */
+/*
+ * ===========================================================================
+ * CPU-version-specific defines and utility
+ * ===========================================================================
+ */
+
+
+ .global dvmCompilerTemplateStart
+ .type dvmCompilerTemplateStart, %function
+ .text
+
+dvmCompilerTemplateStart:
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMP_LONG
+dvmCompiler_TEMPLATE_CMP_LONG:
+/* File: armv5te/TEMPLATE_CMP_LONG.S */
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .LTEMPLATE_CMP_LONG_less @ signed compare on high part
+ bgt .LTEMPLATE_CMP_LONG_greater
+ subs r0, r0, r2 @ r0<- r0 - r2
+ bxeq lr
+ bhi .LTEMPLATE_CMP_LONG_greater @ unsigned compare on low part
+.LTEMPLATE_CMP_LONG_less:
+ mvn r0, #0 @ r0<- -1
+ bx lr
+.LTEMPLATE_CMP_LONG_greater:
+ mov r0, #1 @ r0<- 1
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_RETURN
+dvmCompiler_TEMPLATE_RETURN:
+/* File: armv5te/TEMPLATE_RETURN.S */
+ /*
+ * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+ * If the stored value in returnAddr
+ * is non-zero, the caller is compiled by the JIT thus return to the
+ * address in the code cache following the invoke instruction. Otherwise
+ * return to the special dvmJitToInterpNoChain entry point.
+ */
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rSELF
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
+ SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
+ ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+ ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+ mov r9, #0 @ disable chaining
+#endif
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ @ r2<- method we're returning to
+ cmp r2, #0 @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
+ beq 1f @ bail to interpreter
+#else
+ blxeq lr @ punt to interpreter and compare state
+#endif
+ ldr r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
+ mov rFP, r10 @ publish new FP
+ ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+
+ str r2, [rSELF, #offThread_method]@ self->method = newSave->method
+ ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ add rPC, rPC, #6 @ publish new rPC (advance 6 bytes)
+ str r0, [rSELF, #offThread_methodClassDex]
+ cmp r8, #0 @ check the break flags
+ movne r9, #0 @ clear the chaining cell address
+ str r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
+ cmp r9, #0 @ chaining cell exists?
+ blxne r9 @ jump to the chaining cell
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1 @ callsite is interpreted
+1:
+ mov r0, #0
+ str r0, [rSELF, #offThread_inJitCodeCache] @ reset inJitCodeCache
+ stmia rSELF, {rPC, rFP} @ SAVE_PC_FP_TO_SELF()
+ ldr r2, .LdvmMterpStdBail @ defined in footer.S
+ mov r0, rSELF @ Expecting rSELF in r0
+ blx r2 @ exit the interpreter
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+ /*
+ * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+ * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+ * runtime-resolved callee.
+ */
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+ ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns
+
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne lr @ bail to the interpreter
+ tst r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
+ bne .LinvokeNative
+#else
+ bxne lr @ bail to the interpreter
+#endif
+
+ ldr r10, .LdvmJitToInterpTraceSelectNoChain
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ @ Start executing the callee
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kInlineCacheMiss
+#endif
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+ /*
+ * For monomorphic callsite, setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ */
+ @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
+ @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
+ @ methodToCall is guaranteed to be non-native
+.LinvokeChain:
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ add r12, lr, #2 @ setup the punt-to-interp address
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo r12 @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne r12 @ bail to the interpreter
+
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
+
+ bx lr @ return to the callee-chaining cell
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+ /*
+ * For polymorphic callsite, check whether the cached class pointer matches
+ * the current one. If so setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ *
+ * The predicted chaining cell is declared in ArmLIR.h with the
+ * following layout:
+ *
+ * typedef struct PredictedChainingCell {
+ * u4 branch;
+ * const ClassObject *clazz;
+ * const Method *method;
+ * u4 counter;
+ * } PredictedChainingCell;
+ *
+ * Upon returning to the callsite:
+ * - lr : to branch to the chaining cell
+ * - lr+2: to punt to the interpreter
+ * - lr+4: to fully resolve the callee and may rechain.
+ * r3 <- class
+ * r9 <- counter
+ */
+ @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
+ ldr r3, [r0, #offObject_clazz] @ r3 <- this->class
+ ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz
+ ldr r0, [r2, #8] @ r0 <- predictedChainCell->method
+ ldr r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
+ cmp r3, r8 @ predicted class == actual class?
+#if defined(WITH_JIT_TUNING)
+ ldr r7, .LdvmICHitCount
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ bne 101f
+ ldr r10, [r7, #0]
+101:
+#else
+ ldreq r10, [r7, #0]
+#endif
+ add r10, r10, #1
+ streq r10, [r7, #0]
+#endif
+ ldreqh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldreqh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ beq .LinvokeChain @ predicted chain is valid
+ ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
+ cmp r8, #0 @ initialized class or not
+ moveq r1, #0
+ subne r1, r9, #1 @ count--
+ strne r1, [rSELF, #offThread_icRechainCount] @ write back to thread
+ add lr, lr, #4 @ return to fully-resolve landing pad
+ /*
+ * r1 <- count
+ * r2 <- &predictedChainCell
+ * r3 <- this->class
+ * r4 <- dPC
+ * r7 <- this->class->vtable
+ */
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ @ r7 = methodToCall->registersSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+ bxne lr @ bail to the interpreter
+#else
+ bx lr @ bail to interpreter unconditionally
+#endif
+
+ @ go ahead and transfer control to the native code
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ mov r2, #0
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
+ str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+ @ newFp->localRefCookie=top
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
+
+ mov r2, r0 @ arg2<- methodToCall
+ mov r0, r1 @ arg0<- newFP
+ add r1, rSELF, #offThread_retval @ arg1<- &retval
+ mov r3, rSELF @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rSELF
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ blx r8 @ off to the native code
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
+#endif
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+
+ @ r0 = dalvikCallsitePC
+ bne .LhandleException @ no, handle exception
+
+ str r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
+ cmp r2, #0 @ return chaining cell still exists?
+ bxne r2 @ yes - go ahead
+
+ @ continue executing the next instruction through the interpreter
+ ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+ add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMPG_DOUBLE
+dvmCompiler_TEMPLATE_CMPG_DOUBLE:
+/* File: armv5te/TEMPLATE_CMPG_DOUBLE.S */
+/* File: armv5te/TEMPLATE_CMPL_DOUBLE.S */
+ /*
+ * For the JIT: incoming arguments in r0-r1, r2-r3
+ * result in r0
+ *
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ push {r0-r3} @ save operands
+ mov r11, lr @ save return address
+ ldr ip, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ blx ip
+ bhi .LTEMPLATE_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0, trumps less than
+ add sp, #16 @ drop unused operands
+ bx r11
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LTEMPLATE_CMPG_DOUBLE_gt_or_nan:
+ pop {r2-r3} @ restore operands in reverse order
+ pop {r0-r1} @ restore operands in reverse order
+ ldr ip, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ blx ip
+ movcc r0, #1 @ (greater than) r1<- 1
+ bxcc r11
+ mov r0, #1 @ r1<- 1 or -1 for NaN
+ bx r11
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMPL_DOUBLE
+dvmCompiler_TEMPLATE_CMPL_DOUBLE:
+/* File: armv5te/TEMPLATE_CMPL_DOUBLE.S */
+ /*
+ * For the JIT: incoming arguments in r0-r1, r2-r3
+ * result in r0
+ *
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ push {r0-r3} @ save operands
+ mov r11, lr @ save return address
+ ldr ip, .L__aeabi_cdcmple @ PIC way of "bl __aeabi_cdcmple"
+ blx ip
+ bhi .LTEMPLATE_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0, trumps less than
+ add sp, #16 @ drop unused operands
+ bx r11
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LTEMPLATE_CMPL_DOUBLE_gt_or_nan:
+ pop {r2-r3} @ restore operands in reverse order
+ pop {r0-r1} @ restore operands in reverse order
+ ldr ip, .L__aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ blx ip
+ movcc r0, #1 @ (greater than) r1<- 1
+ bxcc r11
+ mvn r0, #0 @ r1<- 1 or -1 for NaN
+ bx r11
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMPG_FLOAT
+dvmCompiler_TEMPLATE_CMPG_FLOAT:
+/* File: armv5te/TEMPLATE_CMPG_FLOAT.S */
+/* File: armv5te/TEMPLATE_CMPL_FLOAT.S */
+ /*
+ * For the JIT: incoming arguments in r0-r1, r2-r3
+ * result in r0
+ *
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * The straightforward implementation requires 3 calls to functions
+ * that return a result in r0. We can do it with two calls if our
+ * EABI library supports __aeabi_cfcmple (only one if we want to check
+ * for NaN directly):
+ * check x <= y
+ * if <, return -1
+ * if ==, return 0
+ * check y <= x
+ * if <, return 1
+ * return {-1,1}
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ mov r9, r0 @ Save copies - we may need to redo
+ mov r10, r1
+ mov r11, lr @ save return address
+ ldr ip, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ blx ip
+ bhi .LTEMPLATE_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r0, #0 @ (less than) r0<- -1
+ moveq r0, #0 @ (equal) r0<- 0, trumps less than
+ bx r11
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LTEMPLATE_CMPG_FLOAT_gt_or_nan:
+ mov r0, r10 @ restore in reverse order
+ mov r1, r9
+ ldr ip, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ blx ip
+ movcc r0, #1 @ (greater than) r1<- 1
+ bxcc r11
+ mov r0, #1 @ r1<- 1 or -1 for NaN
+ bx r11
+
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_CMPL_FLOAT
+dvmCompiler_TEMPLATE_CMPL_FLOAT:
+/* File: armv5te/TEMPLATE_CMPL_FLOAT.S */
+ /*
+ * For the JIT: incoming arguments in r0-r1, r2-r3
+ * result in r0
+ *
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * The straightforward implementation requires 3 calls to functions
+ * that return a result in r0. We can do it with two calls if our
+ * EABI library supports __aeabi_cfcmple (only one if we want to check
+ * for NaN directly):
+ * check x <= y
+ * if <, return -1
+ * if ==, return 0
+ * check y <= x
+ * if <, return 1
+ * return {-1,1}
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ mov r9, r0 @ Save copies - we may need to redo
+ mov r10, r1
+ mov r11, lr @ save return address
+ ldr ip, .L__aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ blx ip
+ bhi .LTEMPLATE_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r0, #0 @ (less than) r0<- -1
+ moveq r0, #0 @ (equal) r0<- 0, trumps less than
+ bx r11
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LTEMPLATE_CMPL_FLOAT_gt_or_nan:
+ mov r0, r10 @ restore in reverse order
+ mov r1, r9
+ ldr ip, .L__aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ blx ip
+ movcc r0, #1 @ (greater than) r1<- 1
+ bxcc r11
+ mvn r0, #0 @ r1<- 1 or -1 for NaN
+ bx r11
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MUL_LONG
+dvmCompiler_TEMPLATE_MUL_LONG:
+/* File: armv5te/TEMPLATE_MUL_LONG.S */
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * For JIT: op1 in r0/r1, op2 in r2/r3, return in r0/r1
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ mov r0,r9
+ mov r1,r10
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_SHL_LONG
+dvmCompiler_TEMPLATE_SHL_LONG:
+/* File: armv5te/TEMPLATE_SHL_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ */
+ /* shl-long vAA, vBB, vCC */
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_SHR_LONG
+dvmCompiler_TEMPLATE_SHR_LONG:
+/* File: armv5te/TEMPLATE_SHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ */
+ /* shr-long vAA, vBB, vCC */
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_USHR_LONG
+dvmCompiler_TEMPLATE_USHR_LONG:
+/* File: armv5te/TEMPLATE_USHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
+ * 6 bits.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON
+dvmCompiler_TEMPLATE_THROW_EXCEPTION_COMMON:
+/* File: armv5te/TEMPLATE_THROW_EXCEPTION_COMMON.S */
+ /*
+ * Throw an exception from JIT'ed code.
+ * On entry:
+ * r0 Dalvik PC that raises the exception
+ */
+ b .LhandleException
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MEM_OP_DECODE
+dvmCompiler_TEMPLATE_MEM_OP_DECODE:
+/* File: armv5te/TEMPLATE_MEM_OP_DECODE.S */
+#if defined(WITH_SELF_VERIFICATION)
+ /*
+ * This handler encapsulates heap memory ops for selfVerification mode.
+ *
+ * The call to the handler is inserted prior to a heap memory operation.
+ * This handler then calls a function to decode the memory op, and process
+ * it accordingly. Afterwards, the handler changes the return address to
+ * skip the memory op so it never gets executed.
+ */
+ push {r0-r12,lr} @ save out all registers
+ ldr r2, .LdvmSelfVerificationMemOpDecode @ defined in footer.S
+ mov r0, lr @ arg0 <- link register
+ mov r1, sp @ arg1 <- stack pointer
+ blx r2 @ decode and handle the mem op
+ pop {r0-r12,lr} @ restore all registers
+ bx lr @ return to compiled code
+#endif
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_STRING_COMPARETO
+dvmCompiler_TEMPLATE_STRING_COMPARETO:
+/* File: armv5te/TEMPLATE_STRING_COMPARETO.S */
+ /*
+ * String's compareTo.
+ *
+ * Requires r0/r1 to have been previously checked for null. Will
+ * return negative if this's string is < comp, 0 if they are the
+ * same and positive if >.
+ *
+ * IMPORTANT NOTE:
+ *
+ * This code relies on hard-coded offsets for string objects, and must be
+ * kept in sync with definitions in UtfString.h. See asm-constants.h
+ *
+ * On entry:
+ * r0: this object pointer
+ * r1: comp object pointer
+ *
+ */
+
+ mov r2, r0 @ this to r2, opening up r0 for return value
+ subs r0, r2, r1 @ Same?
+ bxeq lr
+
+ ldr r4, [r2, #STRING_FIELDOFF_OFFSET]
+ ldr r9, [r1, #STRING_FIELDOFF_OFFSET]
+ ldr r7, [r2, #STRING_FIELDOFF_COUNT]
+ ldr r10, [r1, #STRING_FIELDOFF_COUNT]
+ ldr r2, [r2, #STRING_FIELDOFF_VALUE]
+ ldr r1, [r1, #STRING_FIELDOFF_VALUE]
+
+ /*
+ * At this point, we have:
+ * value: r2/r1
+ * offset: r4/r9
+ * count: r7/r10
+ * We're going to compute
+ * r11 <- countDiff
+ * r10 <- minCount
+ */
+ subs r11, r7, r10
+ movls r10, r7
+
+ /* Now, build pointers to the string data */
+ add r2, r2, r4, lsl #1
+ add r1, r1, r9, lsl #1
+ /*
+ * Note: data pointers point to previous element so we can use pre-index
+ * mode with base writeback.
+ */
+ add r2, #16-2 @ offset to contents[-1]
+ add r1, #16-2 @ offset to contents[-1]
+
+ /*
+ * At this point we have:
+ * r2: *this string data
+ * r1: *comp string data
+ * r10: iteration count for comparison
+ * r11: value to return if the first part of the string is equal
+ * r0: reserved for result
+ * r3, r4, r7, r8, r9, r12 available for loading string data
+ */
+
+ subs r10, #2
+ blt do_remainder2
+
+ /*
+ * Unroll the first two checks so we can quickly catch early mismatch
+ * on long strings (but preserve incoming alignment)
+ */
+
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ ldrh r7, [r2, #2]!
+ ldrh r8, [r1, #2]!
+ subs r0, r3, r4
+ subeqs r0, r7, r8
+ bxne lr
+ cmp r10, #28
+ bgt do_memcmp16
+ subs r10, #3
+ blt do_remainder
+
+loopback_triple:
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ ldrh r7, [r2, #2]!
+ ldrh r8, [r1, #2]!
+ ldrh r9, [r2, #2]!
+ ldrh r12,[r1, #2]!
+ subs r0, r3, r4
+ subeqs r0, r7, r8
+ subeqs r0, r9, r12
+ bxne lr
+ subs r10, #3
+ bge loopback_triple
+
+do_remainder:
+ adds r10, #3
+ beq returnDiff
+
+loopback_single:
+ ldrh r3, [r2, #2]!
+ ldrh r4, [r1, #2]!
+ subs r0, r3, r4
+ bxne lr
+ subs r10, #1
+ bne loopback_single
+
+returnDiff:
+ mov r0, r11
+ bx lr
+
+do_remainder2:
+ adds r10, #2
+ bne loopback_single
+ mov r0, r11
+ bx lr
+
+ /* Long string case */
+do_memcmp16:
+ mov r4, lr
+ ldr lr, .Lmemcmp16
+ mov r7, r11
+ add r0, r2, #2
+ add r1, r1, #2
+ mov r2, r10
+ blx lr
+ cmp r0, #0
+ bxne r4
+ mov r0, r7
+ bx r4
+
+.Lmemcmp16:
+ .word __memcmp16
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_STRING_INDEXOF
+dvmCompiler_TEMPLATE_STRING_INDEXOF:
+/* File: armv5te/TEMPLATE_STRING_INDEXOF.S */
+ /*
+ * String's indexOf.
+ *
+ * Requires r0 to have been previously checked for null. Will
+ * return index of match of r1 in r0.
+ *
+ * IMPORTANT NOTE:
+ *
+ * This code relies on hard-coded offsets for string objects, and must be
+ * kept in sync wth definitions in UtfString.h See asm-constants.h
+ *
+ * On entry:
+ * r0: string object pointer
+ * r1: char to match
+ * r2: Starting offset in string data
+ */
+
+ ldr r7, [r0, #STRING_FIELDOFF_OFFSET]
+ ldr r8, [r0, #STRING_FIELDOFF_COUNT]
+ ldr r0, [r0, #STRING_FIELDOFF_VALUE]
+
+ /*
+ * At this point, we have:
+ * r0: object pointer
+ * r1: char to match
+ * r2: starting offset
+ * r7: offset
+ * r8: string length
+ */
+
+ /* Build pointer to start of string data */
+ add r0, #16
+ add r0, r0, r7, lsl #1
+
+ /* Save a copy of starting data in r7 */
+ mov r7, r0
+
+ /* Clamp start to [0..count] */
+ cmp r2, #0
+ movlt r2, #0
+ cmp r2, r8
+ movgt r2, r8
+
+ /* Build pointer to start of data to compare and pre-bias */
+ add r0, r0, r2, lsl #1
+ sub r0, #2
+
+ /* Compute iteration count */
+ sub r8, r2
+
+ /*
+ * At this point we have:
+ * r0: start of data to test
+ * r1: chat to compare
+ * r8: iteration count
+ * r7: original start of string
+ * r3, r4, r9, r10, r11, r12 available for loading string data
+ */
+
+ subs r8, #4
+ blt indexof_remainder
+
+indexof_loop4:
+ ldrh r3, [r0, #2]!
+ ldrh r4, [r0, #2]!
+ ldrh r10, [r0, #2]!
+ ldrh r11, [r0, #2]!
+ cmp r3, r1
+ beq match_0
+ cmp r4, r1
+ beq match_1
+ cmp r10, r1
+ beq match_2
+ cmp r11, r1
+ beq match_3
+ subs r8, #4
+ bge indexof_loop4
+
+indexof_remainder:
+ adds r8, #4
+ beq indexof_nomatch
+
+indexof_loop1:
+ ldrh r3, [r0, #2]!
+ cmp r3, r1
+ beq match_3
+ subs r8, #1
+ bne indexof_loop1
+
+indexof_nomatch:
+ mov r0, #-1
+ bx lr
+
+match_0:
+ sub r0, #6
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+match_1:
+ sub r0, #4
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+match_2:
+ sub r0, #2
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+match_3:
+ sub r0, r7
+ asr r0, r0, #1
+ bx lr
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INTERPRET
+dvmCompiler_TEMPLATE_INTERPRET:
+/* File: armv5te/TEMPLATE_INTERPRET.S */
+ /*
+ * This handler transfers control to the interpeter without performing
+ * any lookups. It may be called either as part of a normal chaining
+ * operation, or from the transition code in header.S. We distinquish
+ * the two cases by looking at the link register. If called from a
+ * translation chain, it will point to the chaining Dalvik PC -3.
+ * On entry:
+ * lr - if NULL:
+ * r1 - the Dalvik PC to begin interpretation.
+ * else
+ * [lr, #3] contains Dalvik PC to begin interpretation
+ * rSELF - pointer to thread
+ * rFP - Dalvik frame pointer
+ */
+ cmp lr, #0
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ beq 101f
+ ldr r1,[lr, #3]
+101:
+#else
+ ldrne r1,[lr, #3]
+#endif
+ ldr r2, .LinterpPunt
+ mov r0, r1 @ set Dalvik PC
+ bx r2
+ @ doesn't return
+
+.LinterpPunt:
+ .word dvmJitToInterpPunt
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MONITOR_ENTER
+dvmCompiler_TEMPLATE_MONITOR_ENTER:
+/* File: armv5te/TEMPLATE_MONITOR_ENTER.S */
+ /*
+ * Call out to the runtime to lock an object. Because this thread
+ * may have been suspended in THREAD_MONITOR state and the Jit's
+ * translation cache subsequently cleared, we cannot return directly.
+ * Instead, unconditionally transition to the interpreter to resume.
+ *
+ * On entry:
+ * r0 - self pointer
+ * r1 - the object (which has already been null-checked by the caller
+ * r4 - the Dalvik PC of the following instruction.
+ */
+ ldr r2, .LdvmLockObject
+ mov r3, #0 @ Record that we're not returning
+ str r3, [r0, #offThread_inJitCodeCache]
+ blx r2 @ dvmLockObject(self, obj)
+ ldr r2, .LdvmJitToInterpNoChain
+ @ Bail to interpreter - no chain [note - r4 still contains rPC]
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kHeavyweightMonitor
+#endif
+ bx r2
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG
+dvmCompiler_TEMPLATE_MONITOR_ENTER_DEBUG:
+/* File: armv5te/TEMPLATE_MONITOR_ENTER_DEBUG.S */
+ /*
+ * To support deadlock prediction, this version of MONITOR_ENTER
+ * will always call the heavyweight dvmLockObject, check for an
+ * exception and then bail out to the interpreter.
+ *
+ * On entry:
+ * r0 - self pointer
+ * r1 - the object (which has already been null-checked by the caller
+ * r4 - the Dalvik PC of the following instruction.
+ *
+ */
+ ldr r2, .LdvmLockObject
+ mov r3, #0 @ Record that we're not returning
+ str r3, [r0, #offThread_inJitCodeCache]
+ blx r2 @ dvmLockObject(self, obj)
+ @ test for exception
+ ldr r1, [rSELF, #offThread_exception]
+ cmp r1, #0
+ beq 1f
+ ldr r2, .LhandleException
+ sub r0, r4, #2 @ roll dPC back to this monitor instruction
+ bx r2
+1:
+ @ Bail to interpreter - no chain [note - r4 still contains rPC]
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kHeavyweightMonitor
+#endif
+ ldr pc, .LdvmJitToInterpNoChain
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_PERIODIC_PROFILING
+dvmCompiler_TEMPLATE_PERIODIC_PROFILING:
+/* File: armv5te/TEMPLATE_PERIODIC_PROFILING.S */
+ /*
+ * Increment profile counter for this trace, and decrement
+ * sample counter. If sample counter goes below zero, turn
+ * off profiling.
+ *
+ * On entry
+ * (lr-11) is address of pointer to counter. Note: the counter
+ * actually exists 10 bytes before the return target, but because
+ * we are arriving from thumb mode, lr will have its low bit set.
+ */
+ ldr r0, [lr,#-11]
+ ldr r1, [rSELF, #offThread_pProfileCountdown]
+ ldr r2, [r0] @ get counter
+ ldr r3, [r1] @ get countdown timer
+ add r2, #1
+ subs r2, #1
+ blt .LTEMPLATE_PERIODIC_PROFILING_disable_profiling
+ str r2, [r0]
+ str r3, [r1]
+ bx lr
+
+.LTEMPLATE_PERIODIC_PROFILING_disable_profiling:
+ mov r4, lr @ preserve lr
+ ldr r0, .LdvmJitTraceProfilingOff
+ blx r0
+ bx r4
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_RETURN_PROF
+dvmCompiler_TEMPLATE_RETURN_PROF:
+/* File: armv5te/TEMPLATE_RETURN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_RETURN.S */
+ /*
+ * Unwind a frame from the Dalvik stack for compiled OP_RETURN_XXX.
+ * If the stored value in returnAddr
+ * is non-zero, the caller is compiled by the JIT thus return to the
+ * address in the code cache following the invoke instruction. Otherwise
+ * return to the special dvmJitToInterpNoChain entry point.
+ */
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve live registers
+ mov r0, r6
+ @ r0=rSELF
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore live registers
+#endif
+ SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
+ ldr r10, [r0, #offStackSaveArea_prevFrame] @ r10<- saveArea->prevFrame
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ ldr rPC, [r0, #offStackSaveArea_savedPc] @ rPC<- saveArea->savedPc
+#if !defined(WITH_SELF_VERIFICATION)
+ ldr r9, [r0, #offStackSaveArea_returnAddr] @ r9<- chaining cell ret
+#else
+ mov r9, #0 @ disable chaining
+#endif
+ ldr r2, [r10, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ @ r2<- method we're returning to
+ cmp r2, #0 @ break frame?
+#if !defined(WITH_SELF_VERIFICATION)
+ beq 1f @ bail to interpreter
+#else
+ blxeq lr @ punt to interpreter and compare state
+#endif
+ ldr r1, .LdvmJitToInterpNoChainNoProfile @ defined in footer.S
+ mov rFP, r10 @ publish new FP
+ ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+
+ str r2, [rSELF, #offThread_method]@ self->method = newSave->method
+ ldr r0, [r10, #offClassObject_pDvmDex] @ r0<- method->clazz->pDvmDex
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ add rPC, rPC, #6 @ publish new rPC (advance 6 bytes)
+ str r0, [rSELF, #offThread_methodClassDex]
+ cmp r8, #0 @ check the break flags
+ movne r9, #0 @ clear the chaining cell address
+ str r9, [rSELF, #offThread_inJitCodeCache] @ in code cache or not
+ cmp r9, #0 @ chaining cell exists?
+ blxne r9 @ jump to the chaining cell
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1 @ callsite is interpreted
+1:
+ mov r0, #0
+ str r0, [rSELF, #offThread_inJitCodeCache] @ reset inJitCodeCache
+ stmia rSELF, {rPC, rFP} @ SAVE_PC_FP_TO_SELF()
+ ldr r2, .LdvmMterpStdBail @ defined in footer.S
+ mov r0, rSELF @ Expecting rSELF in r0
+ blx r2 @ exit the interpreter
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NO_OPT.S */
+ /*
+ * For polymorphic callsites - setup the Dalvik frame and load Dalvik PC
+ * into rPC then jump to dvmJitToInterpNoChain to dispatch the
+ * runtime-resolved callee.
+ */
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ ldrh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldrh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ ldr r10, [r0, #offMethod_accessFlags] @ r10<- methodToCall->accessFlags
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+ ldr rPC, [r0, #offMethod_insns] @ rPC<- methodToCall->insns
+
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne lr @ bail to the interpreter
+ tst r10, #ACC_NATIVE
+#if !defined(WITH_SELF_VERIFICATION)
+ bne .LinvokeNative
+#else
+ bxne lr @ bail to the interpreter
+#endif
+
+ ldr r10, .LdvmJitToInterpTraceSelectNoChain
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ @ Start executing the callee
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kInlineCacheMiss
+#endif
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_CHAIN.S */
+ /*
+ * For monomorphic callsite, setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ */
+ @ r0 = methodToCall, r1 = returnCell, r2 = methodToCall->outsSize
+ @ rPC = dalvikCallsite, r7 = methodToCall->registersSize
+ @ methodToCall is guaranteed to be non-native
+.LinvokeChainProf:
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ add r12, lr, #2 @ setup the punt-to-interp address
+ sub r10, r10, r2, lsl #2 @ r10<- bottom (newsave - outsSize)
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo r12 @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ ldr r9, [r0, #offMethod_clazz] @ r9<- method->clazz
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ bxne r12 @ bail to the interpreter
+
+ ldr r3, [r9, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+
+ @ Update "thread" values for the new method
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov rFP, r1 @ fp = newFp
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = newFp
+#if defined(TEMPLATE_INLINE_PROFILING)
+ stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
+ mov r1, r6
+ @ r0=methodToCall, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r2,lr} @ restore registers
+#endif
+
+ bx lr @ return to the callee-chaining cell
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN.S */
+ /*
+ * For polymorphic callsite, check whether the cached class pointer matches
+ * the current one. If so setup the Dalvik frame and return to the
+ * Thumb code through the link register to transfer control to the callee
+ * method through a dedicated chaining cell.
+ *
+ * The predicted chaining cell is declared in ArmLIR.h with the
+ * following layout:
+ *
+ * typedef struct PredictedChainingCell {
+ * u4 branch;
+ * const ClassObject *clazz;
+ * const Method *method;
+ * u4 counter;
+ * } PredictedChainingCell;
+ *
+ * Upon returning to the callsite:
+ * - lr : to branch to the chaining cell
+ * - lr+2: to punt to the interpreter
+ * - lr+4: to fully resolve the callee and may rechain.
+ * r3 <- class
+ * r9 <- counter
+ */
+ @ r0 = this, r1 = returnCell, r2 = predictedChainCell, rPC = dalvikCallsite
+ ldr r3, [r0, #offObject_clazz] @ r3 <- this->class
+ ldr r8, [r2, #4] @ r8 <- predictedChainCell->clazz
+ ldr r0, [r2, #8] @ r0 <- predictedChainCell->method
+ ldr r9, [rSELF, #offThread_icRechainCount] @ r1 <- shared rechainCount
+ cmp r3, r8 @ predicted class == actual class?
+#if defined(WITH_JIT_TUNING)
+ ldr r7, .LdvmICHitCount
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ bne 101f
+ ldr r10, [r7, #0]
+101:
+#else
+ ldreq r10, [r7, #0]
+#endif
+ add r10, r10, #1
+ streq r10, [r7, #0]
+#endif
+ ldreqh r7, [r0, #offMethod_registersSize] @ r7<- methodToCall->regsSize
+ ldreqh r2, [r0, #offMethod_outsSize] @ r2<- methodToCall->outsSize
+ beq .LinvokeChainProf @ predicted chain is valid
+ ldr r7, [r3, #offClassObject_vtable] @ r7 <- this->class->vtable
+ cmp r8, #0 @ initialized class or not
+ moveq r1, #0
+ subne r1, r9, #1 @ count--
+ strne r1, [rSELF, #offThread_icRechainCount] @ write back to thread
+ add lr, lr, #4 @ return to fully-resolve landing pad
+ /*
+ * r1 <- count
+ * r2 <- &predictedChainCell
+ * r3 <- this->class
+ * r4 <- dPC
+ * r7 <- this->class->vtable
+ */
+ bx lr
+
+#undef TEMPLATE_INLINE_PROFILING
+
+/* ------------------------------ */
+ .balign 4
+ .global dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF
+dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE_PROF.S */
+#define TEMPLATE_INLINE_PROFILING
+/* File: armv5te/TEMPLATE_INVOKE_METHOD_NATIVE.S */
+ @ r0 = methodToCall, r1 = returnCell, rPC = dalvikCallsite
+ @ r7 = methodToCall->registersSize
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ ldrb r8, [rSELF, #offThread_breakFlags] @ r8<- breakFlags
+ add r3, r1, #1 @ Thumb addr is odd
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r7, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- stack save area
+ cmp r10, r9 @ bottom < interpStackEnd?
+ bxlo lr @ return to raise stack overflow excep.
+ @ r1 = newFP, r0 = methodToCall, r3 = returnCell, rPC = dalvikCallsite
+ str rPC, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+ str rPC, [r1, #(offStackSaveArea_savedPc - sizeofStackSaveArea)]
+
+ @ set up newSaveArea
+ str rFP, [r1, #(offStackSaveArea_prevFrame - sizeofStackSaveArea)]
+ str r3, [r1, #(offStackSaveArea_returnAddr - sizeofStackSaveArea)]
+ str r0, [r1, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ cmp r8, #0 @ breakFlags != 0
+ ldr r8, [r0, #offMethod_nativeFunc] @ r8<- method->nativeFunc
+#if !defined(WITH_SELF_VERIFICATION)
+ bxne lr @ bail to the interpreter
+#else
+ bx lr @ bail to interpreter unconditionally
+#endif
+
+ @ go ahead and transfer control to the native code
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ mov r2, #0
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r2, [rSELF, #offThread_inJitCodeCache] @ not in the jit code cache
+ str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+ @ newFp->localRefCookie=top
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
+
+ mov r2, r0 @ arg2<- methodToCall
+ mov r0, r1 @ arg0<- newFP
+ add r1, rSELF, #offThread_retval @ arg1<- &retval
+ mov r3, rSELF @ arg3<- self
+#if defined(TEMPLATE_INLINE_PROFILING)
+ @ r2=methodToCall, r6=rSELF
+ stmfd sp!, {r2,r6} @ to be consumed after JNI return
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ mov r0, r2
+ mov r1, r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+#endif
+
+ blx r8 @ off to the native code
+
+#if defined(TEMPLATE_INLINE_PROFILING)
+ ldmfd sp!, {r0-r1} @ restore r2 and r6
+ @ r0=JNIMethod, r1=rSELF
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
+#endif
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ ldr r0, [rFP, #(offStackSaveArea_currentPc - sizeofStackSaveArea)]
+
+ @ r0 = dalvikCallsitePC
+ bne .LhandleException @ no, handle exception
+
+ str r2, [rSELF, #offThread_inJitCodeCache] @ set the mode properly
+ cmp r2, #0 @ return chaining cell still exists?
+ bxne r2 @ yes - go ahead
+
+ @ continue executing the next instruction through the interpreter
+ ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+ add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ mov pc, r1
+
+#undef TEMPLATE_INLINE_PROFILING
+
+ .size dvmCompilerTemplateStart, .-dvmCompilerTemplateStart
+/* File: armv5te/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+.LinvokeNative:
+ @ Prep for the native call
+ @ r1 = newFP, r0 = methodToCall
+ mov r2, #0
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ str r2, [rSELF, #offThread_inJitCodeCache] @ not in jit code cache
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r9, [r1, #(offStackSaveArea_localRefCookie - sizeofStackSaveArea)]
+ @ newFp->localRefCookie=top
+ ldrh lr, [rSELF, #offThread_subMode]
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- new stack save area
+
+ mov r2, r0 @ r2<- methodToCall
+ mov r0, r1 @ r0<- newFP
+ add r1, rSELF, #offThread_retval @ r1<- &retval
+ mov r3, rSELF @ arg3<- self
+ ands lr, #kSubModeMethodTrace
+ beq 121f @ hop if not profiling
+ @ r2: methodToCall, r6: rSELF
+ stmfd sp!, {r2,r6}
+ stmfd sp!, {r0-r3}
+ mov r0, r2
+ mov r1, r6
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
+ ldmfd sp!, {r0-r3}
+
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
+
+ ldmfd sp!, {r0-r1}
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
+ b 212f
+121:
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
+212:
+
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r2, [r10, #offStackSaveArea_returnAddr] @ r2 = chaining cell ret
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved->top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ ldr r0, [r10, #offStackSaveArea_savedPc] @ reload rPC
+
+ @ r0 = dalvikCallsitePC
+ bne .LhandleException @ no, handle exception
+
+ str r2, [rSELF, #offThread_inJitCodeCache] @ set the new mode
+ cmp r2, #0 @ return chaining cell still exists?
+ bxne r2 @ yes - go ahead
+
+ @ continue executing the next instruction through the interpreter
+ ldr r1, .LdvmJitToInterpTraceSelectNoChain @ defined in footer.S
+ add rPC, r0, #6 @ reconstruct new rPC (advance 6 bytes)
+#if defined(WITH_JIT_TUNING)
+ mov r0, #kCallsiteInterpreted
+#endif
+ bx r1
+
+/*
+ * On entry:
+ * r0 Faulting Dalvik PC
+ */
+.LhandleException:
+#if defined(WITH_SELF_VERIFICATION)
+ ldr pc, .LdeadFood @ should not see this under self-verification mode
+.LdeadFood:
+ .word 0xdeadf00d
+#endif
+ mov r2, #0
+ str r2, [rSELF, #offThread_inJitCodeCache] @ in interpreter land
+ ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
+ ldr rIBASE, .LdvmAsmInstructionStart @ same as above
+ mov rPC, r0 @ reload the faulting Dalvik address
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
+
+ .align 2
+.LdvmAsmInstructionStart:
+ .word dvmAsmInstructionStart
+.LdvmJitToInterpNoChainNoProfile:
+ .word dvmJitToInterpNoChainNoProfile
+.LdvmJitToInterpTraceSelectNoChain:
+ .word dvmJitToInterpTraceSelectNoChain
+.LdvmJitToInterpNoChain:
+ .word dvmJitToInterpNoChain
+.LdvmMterpStdBail:
+ .word dvmMterpStdBail
+.LdvmMterpCommonExceptionThrown:
+ .word dvmMterpCommonExceptionThrown
+.LdvmLockObject:
+ .word dvmLockObject
+.LdvmJitTraceProfilingOff:
+ .word dvmJitTraceProfilingOff
+#if defined(WITH_JIT_TUNING)
+.LdvmICHitCount:
+ .word gDvmICHitCount
+#endif
+#if defined(WITH_SELF_VERIFICATION)
+.LdvmSelfVerificationMemOpDecode:
+ .word dvmSelfVerificationMemOpDecode
+#endif
+.LdvmFastMethodTraceEnter:
+ .word dvmFastMethodTraceEnter
+.LdvmFastNativeMethodTraceExit:
+ .word dvmFastNativeMethodTraceExit
+.LdvmFastMethodTraceExit:
+ .word dvmFastMethodTraceExit
+.L__aeabi_cdcmple:
+ .word __aeabi_cdcmple
+.L__aeabi_cfcmple:
+ .word __aeabi_cfcmple
+
+ .global dmvCompilerTemplateEnd
+dmvCompilerTemplateEnd:
+
+#endif /* WITH_JIT */
+
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
index ba798e06d..23f281228 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a-neon.S
@@ -170,8 +170,8 @@ dvmCompiler_TEMPLATE_RETURN:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -272,8 +272,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -281,7 +281,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
/* ------------------------------ */
.balign 4
@@ -330,8 +330,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -453,8 +453,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -463,8 +463,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1508,8 +1508,8 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -1614,8 +1614,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1623,7 +1623,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
#undef TEMPLATE_INLINE_PROFILING
@@ -1676,8 +1676,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -1807,8 +1807,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1817,8 +1817,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1880,20 +1880,20 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -1919,7 +1919,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -1936,7 +1936,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
index 825ac408a..360ebfa8b 100644
--- a/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
+++ b/vm/compiler/template/out/CompilerTemplateAsm-armv7-a.S
@@ -170,8 +170,8 @@ dvmCompiler_TEMPLATE_RETURN:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -272,8 +272,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -281,7 +281,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
/* ------------------------------ */
.balign 4
@@ -330,8 +330,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -453,8 +453,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -463,8 +463,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1508,8 +1508,8 @@ dvmCompiler_TEMPLATE_RETURN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve live registers
mov r0, r6
@ r0=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceExit
+ ldr ip, .LdvmFastMethodTraceExit
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore live registers
#endif
SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old)
@@ -1614,8 +1614,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
stmfd sp!, {r0-r3} @ preserve r0-r3
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1623,7 +1623,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NO_OPT_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kInlineCacheMiss
#endif
- mov pc, r10 @ dvmJitToInterpTraceSelectNoChain
+ bx r10 @ dvmJitToInterpTraceSelectNoChain
#undef TEMPLATE_INLINE_PROFILING
@@ -1676,8 +1676,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_CHAIN_PROF:
stmfd sp!, {r0-r2,lr} @ preserve clobbered live registers
mov r1, r6
@ r0=methodToCall, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r2,lr} @ restore registers
#endif
@@ -1807,8 +1807,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
mov r0, r2
mov r1, r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3} @ restore r0-r3
#endif
@@ -1817,8 +1817,8 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(TEMPLATE_INLINE_PROFILING)
ldmfd sp!, {r0-r1} @ restore r2 and r6
@ r0=JNIMethod, r1=rSELF
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
#endif
@ native return; r10=newSaveArea
@ equivalent to dvmPopJniLocals
@@ -1880,20 +1880,20 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
stmfd sp!, {r0-r3}
mov r0, r2
mov r1, r6
- mov lr, pc
- ldr pc, .LdvmFastMethodTraceEnter
+ ldr ip, .LdvmFastMethodTraceEnter
+ blx ip
ldmfd sp!, {r0-r3}
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
ldmfd sp!, {r0-r1}
- mov lr, pc
- ldr pc, .LdvmFastNativeMethodTraceExit
+ ldr ip, .LdvmFastNativeMethodTraceExit
+ blx ip
b 212f
121:
- mov lr, pc
- ldr pc, [r2, #offMethod_nativeFunc]
+ ldr ip, [r2, #offMethod_nativeFunc]
+ blx ip
212:
@ native return; r10=newSaveArea
@@ -1919,7 +1919,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
#if defined(WITH_JIT_TUNING)
mov r0, #kCallsiteInterpreted
#endif
- mov pc, r1
+ bx r1
/*
* On entry:
@@ -1936,7 +1936,7 @@ dvmCompiler_TEMPLATE_INVOKE_METHOD_NATIVE_PROF:
ldr r1, .LdvmMterpCommonExceptionThrown @ PIC way of getting &func
ldr rIBASE, .LdvmAsmInstructionStart @ same as above
mov rPC, r0 @ reload the faulting Dalvik address
- mov pc, r1 @ branch to dvmMterpCommonExceptionThrown
+ bx r1 @ branch to dvmMterpCommonExceptionThrown
.align 2
.LdvmAsmInstructionStart:
diff --git a/vm/compiler/template/rebuild.sh b/vm/compiler/template/rebuild.sh
index f04d097e1..aba4749dd 100755
--- a/vm/compiler/template/rebuild.sh
+++ b/vm/compiler/template/rebuild.sh
@@ -19,4 +19,4 @@
# generated as part of the build.
#
set -e
-for arch in ia32 armv5te armv5te-vfp armv7-a armv7-a-neon; do TARGET_ARCH_EXT=$arch make -f Makefile-template; done
+for arch in ia32 armv5te armv5te-vfp armv6j armv6-vfp armv7-a armv7-a-neon; do TARGET_ARCH_EXT=$arch make -f Makefile-template; done
diff --git a/vm/dalvik b/vm/dalvik
index 5569db917..cb46775f0 100644
--- a/vm/dalvik
+++ b/vm/dalvik
@@ -20,7 +20,7 @@ ANDROID_LOG_TAGS="" \
ANDROID_DATA=/tmp/android-data \
ANDROID_ROOT=$ANDROID_BUILD_TOP/out/host/linux-x86 \
LD_LIBRARY_PATH=$ANDROID_BUILD_TOP/out/host/linux-x86/lib \
-$ANDROID_BUILD_TOP/out/host/linux-x86/bin/dalvikvm \
+exec $ANDROID_BUILD_TOP/out/host/linux-x86/bin/dalvikvm \
-Xbootclasspath\
:$ANDROID_BUILD_TOP/out/host/linux-x86/framework/core-hostdex.jar\
:$ANDROID_BUILD_TOP/out/host/linux-x86/framework/bouncycastle-hostdex.jar\
diff --git a/vm/hprof/HprofClass.cpp b/vm/hprof/HprofClass.cpp
index 023efdbb4..b5462440a 100644
--- a/vm/hprof/HprofClass.cpp
+++ b/vm/hprof/HprofClass.cpp
@@ -89,6 +89,10 @@ hprof_class_object_id hprofLookupClassId(const ClassObject *clazz)
val = dvmHashTableLookup(gClassHashTable, computeClassHash(clazz),
(void *)clazz, classCmp, true);
assert(val != NULL);
+#ifdef NDEBUG
+ // variable defined but not used warning breaks -Werror
+ (void)val;
+#endif
dvmHashTableUnlock(gClassHashTable);
diff --git a/vm/interp/Interp.cpp b/vm/interp/Interp.cpp
index 41097644f..c57ea672a 100644
--- a/vm/interp/Interp.cpp
+++ b/vm/interp/Interp.cpp
@@ -766,7 +766,9 @@ static void updateDebugger(const Method* method, const u2* pc, const u4* fp,
if (pCtrl->active && pCtrl->thread == self) {
int frameDepth;
bool doStop = false;
+#ifndef LOG_NDEBUG
const char* msg = NULL;
+#endif
assert(!dvmIsNativeMethod(method));
@@ -778,14 +780,20 @@ static void updateDebugger(const Method* method, const u2* pc, const u4* fp,
*/
if (pCtrl->method != method) {
doStop = true;
+#ifndef LOG_NDEBUG
msg = "new method";
+#endif
} else if (pCtrl->size == SS_MIN) {
doStop = true;
+#ifndef LOG_NDEBUG
msg = "new instruction";
+#endif
} else if (!dvmAddressSetGet(
pCtrl->pAddressSet, pc - method->insns)) {
doStop = true;
+#ifndef LOG_NDEBUG
msg = "new line";
+#endif
}
} else if (pCtrl->depth == SD_OVER) {
/*
@@ -799,16 +807,22 @@ static void updateDebugger(const Method* method, const u2* pc, const u4* fp,
if (frameDepth < pCtrl->frameDepth) {
/* popped up one or more frames, always trigger */
doStop = true;
+#ifndef LOG_NDEBUG
msg = "method pop";
+#endif
} else if (frameDepth == pCtrl->frameDepth) {
/* same depth, see if we moved */
if (pCtrl->size == SS_MIN) {
doStop = true;
+#ifndef LOG_NDEBUG
msg = "new instruction";
+#endif
} else if (!dvmAddressSetGet(pCtrl->pAddressSet,
pc - method->insns)) {
doStop = true;
+#ifndef LOG_NDEBUG
msg = "new line";
+#endif
}
}
} else {
@@ -824,7 +838,9 @@ static void updateDebugger(const Method* method, const u2* pc, const u4* fp,
frameDepth = dvmComputeVagueFrameDepth(self, fp);
if (frameDepth < pCtrl->frameDepth) {
doStop = true;
+#ifndef LOG_NDEBUG
msg = "method pop";
+#endif
}
}
@@ -1457,7 +1473,7 @@ void dvmThrowVerificationError(const Method* method, int kind, int ref)
case VERIFY_ERROR_NONE:
/* should never happen; use default exception */
assert(false);
- msg = strdup("weird - no error specified");
+ msg = "weird - no error specified";
break;
/* no default clause -- want warning if enum updated */
diff --git a/vm/jdwp/JdwpAdb.cpp b/vm/jdwp/JdwpAdb.cpp
index 87db1d24b..5bdfd9413 100644
--- a/vm/jdwp/JdwpAdb.cpp
+++ b/vm/jdwp/JdwpAdb.cpp
@@ -430,7 +430,7 @@ static bool handlePacket(JdwpState* state)
JdwpReqHeader hdr;
u4 length, id;
u1 flags, cmdSet, cmd;
- u2 error;
+// u2 error;
bool reply;
int dataLen;
@@ -441,7 +441,7 @@ static bool handlePacket(JdwpState* state)
flags = read1(&buf);
if ((flags & kJDWPFlagReply) != 0) {
reply = true;
- error = read2BE(&buf);
+ /*error =*/ read2BE(&buf);
} else {
reply = false;
cmdSet = read1(&buf);
diff --git a/vm/jdwp/JdwpHandler.cpp b/vm/jdwp/JdwpHandler.cpp
index b41139a4c..d51afc7d4 100644
--- a/vm/jdwp/JdwpHandler.cpp
+++ b/vm/jdwp/JdwpHandler.cpp
@@ -964,9 +964,12 @@ static JdwpError handleOR_EnableCollection(JdwpState* state,
static JdwpError handleOR_IsCollected(JdwpState* state,
const u1* buf, int dataLen, ExpandBuf* pReply)
{
+#ifndef LOG_NDEBUG
ObjectId objectId;
- objectId = dvmReadObjectId(&buf);
+ objectId =
+#endif
+ dvmReadObjectId(&buf);
ALOGV(" Req IsCollected(0x%llx)", objectId);
// TODO: currently returning false; must integrate with GC
@@ -1172,9 +1175,9 @@ static JdwpError handleTR_FrameCount(JdwpState* state,
static JdwpError handleTR_CurrentContendedMonitor(JdwpState* state,
const u1* buf, int dataLen, ExpandBuf* pReply)
{
- ObjectId threadId;
+ //ObjectId threadId;
- threadId = dvmReadObjectId(&buf);
+ /*threadId =*/ dvmReadObjectId(&buf);
// TODO: create an Object to represent the monitor (we're currently
// just using a raw Monitor struct in the VM)
@@ -1557,8 +1560,11 @@ static JdwpError handleER_Set(JdwpState* state,
static JdwpError handleER_Clear(JdwpState* state,
const u1* buf, int dataLen, ExpandBuf* pReply)
{
+#ifndef LOG_NDEBUG
u1 eventKind;
- eventKind = read1(&buf);
+ eventKind =
+#endif
+ read1(&buf);
u4 requestId = read4BE(&buf);
ALOGV(" Req to clear eventKind=%d requestId=%#x", eventKind, requestId);
diff --git a/vm/jdwp/JdwpSocket.cpp b/vm/jdwp/JdwpSocket.cpp
index ad0a287ca..babb4a9da 100644
--- a/vm/jdwp/JdwpSocket.cpp
+++ b/vm/jdwp/JdwpSocket.cpp
@@ -586,7 +586,7 @@ static bool handlePacket(JdwpState* state)
JdwpReqHeader hdr;
u4 length, id;
u1 flags, cmdSet, cmd;
- u2 error;
+ //u2 error;
bool reply;
int dataLen;
@@ -599,7 +599,7 @@ static bool handlePacket(JdwpState* state)
flags = read1(&buf);
if ((flags & kJDWPFlagReply) != 0) {
reply = true;
- error = read2BE(&buf);
+ /*error =*/ read2BE(&buf);
} else {
reply = false;
cmdSet = read1(&buf);
diff --git a/vm/mterp/armv5te/footer.S b/vm/mterp/armv5te/footer.S
index d96e005fa..e7e253a68 100644
--- a/vm/mterp/armv5te/footer.S
+++ b/vm/mterp/armv5te/footer.S
@@ -655,8 +655,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -682,8 +682,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/common/jit-config.h b/vm/mterp/common/jit-config.h
index 8cc32e3ca..fdebd8f10 100644
--- a/vm/mterp/common/jit-config.h
+++ b/vm/mterp/common/jit-config.h
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#if __ARM_ARCH_5TE__
+#if __ARM_ARCH_5TE__ || __ARM_ARCH_6__
#define JIT_PROF_SIZE_LOG_2 9
#else
#define JIT_PROF_SIZE_LOG_2 11
diff --git a/vm/mterp/config-armv6-vfp b/vm/mterp/config-armv6-vfp
new file mode 100644
index 000000000..d47b9e50d
--- /dev/null
+++ b/vm/mterp/config-armv6-vfp
@@ -0,0 +1,108 @@
+# Copyright (C) 2009 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARMv6 targets with VFP support.
+#
+# This is just ARMv5TE with replacements for the handlers that can benefit
+# from floating-point instructions. Essentially all float/double
+# operations except for "remainder" and conversions to/from 64-bit ints.
+#
+
+handler-style computed-goto
+handler-size 64
+
+# source for the instruction table stub
+asm-stub armv5te/stub.S
+
+# source for alternate entry stub
+asm-alt-stub armv5te/alt_stub.S
+
+# file header and basic definitions
+import c/header.cpp
+import armv5te/header.S
+
+# C pre-processor defines for stub C instructions
+import cstubs/stubdefs.cpp
+
+# highly-platform-specific defs
+import armv5te/platform.S
+
+# common defs for the C helpers; include this before the instruction handlers
+import c/opcommon.cpp
+
+# arch-specific entry point to interpreter
+import armv5te/entry.S
+
+# opcode list; argument to op-start is default directory
+op-start armv5te
+ op OP_ADD_DOUBLE arm-vfp
+ op OP_ADD_DOUBLE_2ADDR arm-vfp
+ op OP_ADD_FLOAT arm-vfp
+ op OP_ADD_FLOAT_2ADDR arm-vfp
+ op OP_CMPG_DOUBLE arm-vfp
+ op OP_CMPG_FLOAT arm-vfp
+ op OP_CMPL_DOUBLE arm-vfp
+ op OP_CMPL_FLOAT arm-vfp
+ op OP_DIV_DOUBLE arm-vfp
+ op OP_DIV_DOUBLE_2ADDR arm-vfp
+ op OP_DIV_FLOAT arm-vfp
+ op OP_DIV_FLOAT_2ADDR arm-vfp
+ op OP_DOUBLE_TO_FLOAT arm-vfp
+ op OP_DOUBLE_TO_INT arm-vfp
+ op OP_FLOAT_TO_DOUBLE arm-vfp
+ op OP_FLOAT_TO_INT arm-vfp
+ op OP_INT_TO_DOUBLE arm-vfp
+ op OP_INT_TO_FLOAT arm-vfp
+ op OP_MUL_DOUBLE arm-vfp
+ op OP_MUL_DOUBLE_2ADDR arm-vfp
+ op OP_MUL_FLOAT arm-vfp
+ op OP_MUL_FLOAT_2ADDR arm-vfp
+ op OP_SUB_DOUBLE arm-vfp
+ op OP_SUB_DOUBLE_2ADDR arm-vfp
+ op OP_SUB_FLOAT arm-vfp
+ op OP_SUB_FLOAT_2ADDR arm-vfp
+
+ # use trivial integer operation
+ #op OP_NEG_DOUBLE armv5te
+ #op OP_NEG_FLOAT armv5te
+
+ # use __aeabi_* functions
+ #op OP_DOUBLE_TO_LONG armv5te
+ #op OP_FLOAT_TO_LONG armv5te
+ #op OP_LONG_TO_DOUBLE armv5te
+ #op OP_LONG_TO_FLOAT armv5te
+
+ # no "remainder" op in vfp or libgcc.a; use libc function
+ #op OP_REM_DOUBLE armv5te
+ #op OP_REM_DOUBLE_2ADDR armv5te
+ #op OP_REM_FLOAT armv5te
+ #op OP_REM_FLOAT_2ADDR armv5te
+
+ # experiment, unrelated to vfp
+ #op OP_INT_TO_BYTE armv6
+ #op OP_INT_TO_CHAR armv6
+ #op OP_INT_TO_SHORT armv6
+op-end
+
+# "helper" code for C; include if you use any of the C stubs (this generates
+# object code, so it's normally excluded)
+##import c/gotoTargets.cpp
+
+# end of defs; include this when cstubs/stubdefs.cpp is included
+import cstubs/enddefs.cpp
+
+# common subroutines for asm
+import armv5te/footer.S
+import armv5te/debug.cpp
diff --git a/vm/mterp/config-armv6j b/vm/mterp/config-armv6j
new file mode 100644
index 000000000..621578022
--- /dev/null
+++ b/vm/mterp/config-armv6j
@@ -0,0 +1,78 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for ARMv5TE architecture targets.
+#
+
+handler-style computed-goto
+handler-size 64
+
+# source for the instruction table stub
+asm-stub armv5te/stub.S
+
+# source for alternate entry stub
+asm-alt-stub armv5te/alt_stub.S
+
+# file header and basic definitions
+import c/header.cpp
+import armv5te/header.S
+
+# C pre-processor defines for stub C instructions
+import cstubs/stubdefs.cpp
+
+# highly-platform-specific defs
+import armv5te/platform.S
+
+# common defs for the C helpers; include this before the instruction handlers
+import c/opcommon.cpp
+
+# arch-specific entry point to interpreter
+import armv5te/entry.S
+
+# opcode list; argument to op-start is default directory
+op-start armv5te
+ #op OP_FILL_ARRAY_DATA c
+
+ # use trivial integer operation
+ op OP_NEG_DOUBLE armv5te
+ op OP_NEG_FLOAT armv5te
+
+ # use __aeabi_* functions
+ op OP_DOUBLE_TO_LONG armv5te
+ op OP_FLOAT_TO_LONG armv5te
+ op OP_LONG_TO_DOUBLE armv5te
+ op OP_LONG_TO_FLOAT armv5te
+
+ # no "remainder" op in vfp or libgcc.a; use libc function
+ op OP_REM_DOUBLE armv5te
+ op OP_REM_DOUBLE_2ADDR armv5te
+ op OP_REM_FLOAT armv5te
+ op OP_REM_FLOAT_2ADDR armv5te
+ op OP_INT_TO_BYTE armv6
+ op OP_INT_TO_CHAR armv6
+ op OP_INT_TO_SHORT armv6
+op-end
+
+# "helper" code for C; include if you use any of the C stubs (this generates
+# object code, so it's normally excluded)
+##import c/gotoTargets.cpp
+
+# end of defs; include this when cstubs/stubdefs.cpp is included
+import cstubs/enddefs.cpp
+
+# common subroutines for asm
+import armv5te/footer.S
+import armv5te/debug.cpp
+
diff --git a/vm/mterp/out/InterpAsm-armv5te-vfp.S b/vm/mterp/out/InterpAsm-armv5te-vfp.S
index 84b47a210..cb928c3a5 100644
--- a/vm/mterp/out/InterpAsm-armv5te-vfp.S
+++ b/vm/mterp/out/InterpAsm-armv5te-vfp.S
@@ -16298,8 +16298,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -16325,8 +16325,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv5te.S b/vm/mterp/out/InterpAsm-armv5te.S
index 89c81337d..ab4c8d4ef 100644
--- a/vm/mterp/out/InterpAsm-armv5te.S
+++ b/vm/mterp/out/InterpAsm-armv5te.S
@@ -16756,8 +16756,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -16783,8 +16783,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv6-vfp.S b/vm/mterp/out/InterpAsm-armv6-vfp.S
new file mode 100644
index 000000000..ef93571df
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-armv6-vfp.S
@@ -0,0 +1,16866 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv6-vfp'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: armv5te/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * ARMv5 definitions and declarations.
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them. If VFP
+is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
+s0-s15 (d0-d7, q0-a3) do not need to be.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF self (Thread) pointer
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+
+/* save/restore the PC and/or FP from the thread struct */
+#define LOAD_PC_FROM_SELF() ldr rPC, [rSELF, #offThread_pc]
+#define SAVE_PC_TO_SELF() str rPC, [rSELF, #offThread_pc]
+#define LOAD_FP_FROM_SELF() ldr rFP, [rSELF, #offThread_curFrame]
+#define SAVE_FP_TO_SELF() str rFP, [rSELF, #offThread_curFrame]
+#define LOAD_PC_FP_FROM_SELF() ldmia rSELF, {rPC, rFP}
+#define SAVE_PC_FP_TO_SELF() stmia rSELF, {rPC, rFP}
+
+/*
+ * "export" the PC to the stack frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
+ * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
+ *
+ * It's okay to do this more than once.
+ */
+#define EXPORT_PC() \
+ str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+ sub _reg, _fpreg, #sizeofStackSaveArea
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+#define FETCH_INST() ldrh rINST, [rPC]
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #((_count)*2)]!
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+ ldrh _dreg, [_sreg, #((_count)*2)]!
+
+/*
+ * Fetch the next instruction from an offset specified by _reg. Updates
+ * rPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]!
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(_reg, _count) ldrh _reg, [rPC, #((_count)*2)]
+#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #((_count)*2)]
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #((_count)*2+(_byte))]
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(_reg) and _reg, rINST, #255
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255
+
+/*
+ * Begin executing the opcode in _reg. Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6
+#define GOTO_OPCODE_BASE(_base,_reg) add pc, _base, _reg, lsl #6
+#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6
+#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2]
+#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2]
+
+/*
+ * Convert a virtual register index into an address.
+ */
+#define VREG_INDEX_TO_ADDR(_reg, _vreg) \
+ add _reg, rFP, _vreg, lsl #2
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../common/asm-constants.h"
+
+#if defined(WITH_JIT)
+#include "../common/jit-config.h"
+#endif
+
+/* File: armv5te/platform.S */
+/*
+ * ===========================================================================
+ * CPU-version-specific defines
+ * ===========================================================================
+ */
+
+/*
+ * Macro for data memory barrier; not meaningful pre-ARMv6K.
+ */
+.macro SMP_DMB
+.endm
+
+/*
+ * Macro for data memory barrier; not meaningful pre-ARMv6K.
+ */
+.macro SMP_DMB_ST
+.endm
+
+/* File: armv5te/entry.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+/*
+ * We don't have formal stack frames, so gdb scans upward in the code
+ * to find the start of the function (a label with the %function type),
+ * and then looks at the next few instructions to figure out what
+ * got pushed onto the stack. From this it figures out how to restore
+ * the registers, including PC, for the previous stack frame. If gdb
+ * sees a non-function label, it stops scanning, so either we need to
+ * have nothing but assembler-local labels between the entry point and
+ * the break, or we need to fake it out.
+ *
+ * When this is defined, we add some stuff to make gdb less confused.
+ */
+#define ASSIST_DEBUGGER 1
+
+ .text
+ .align 2
+ .global dvmMterpStdRun
+ .type dvmMterpStdRun, %function
+
+/*
+ * On entry:
+ * r0 Thread* self
+ *
+ * The return comes via a call to dvmMterpStdBail().
+ */
+dvmMterpStdRun:
+#define MTERP_ENTRY1 \
+ .save {r4-r10,fp,lr}; \
+ stmfd sp!, {r4-r10,fp,lr} @ save 9 regs
+#define MTERP_ENTRY2 \
+ .pad #4; \
+ sub sp, sp, #4 @ align 64
+
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+
+ /* save stack pointer, add magic word for debuggerd */
+ str sp, [r0, #offThread_bailPtr] @ save SP for eventual return
+
+ /* set up "named" registers, figure out entry point */
+ mov rSELF, r0 @ set rSELF
+ LOAD_PC_FP_FROM_SELF() @ load rPC and rFP from "thread"
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable] @ set rIBASE
+
+#if defined(WITH_JIT)
+.LentryInstr:
+ /* Entry is always a possible trace start */
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ FETCH_INST()
+ mov r1, #0 @ prepare the value for the new state
+ str r1, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
+ cmp r0,#0 @ is profiling disabled?
+#if !defined(WITH_SELF_VERIFICATION)
+ bne common_updateProfile @ profiling is enabled
+#else
+ ldr r2, [rSELF, #offThread_shadowSpace] @ to find out the jit exit state
+ beq 1f @ profiling is disabled
+ ldr r3, [r2, #offShadowSpace_jitExitState] @ jit exit state
+ cmp r3, #kSVSTraceSelect @ hot trace following?
+ moveq r2,#kJitTSelectRequestHot @ ask for trace selection
+ beq common_selectTrace @ go build the trace
+ cmp r3, #kSVSNoProfile @ don't profile the next instruction?
+ beq 1f @ intrepret the next instruction
+ b common_updateProfile @ collect profiles
+#endif
+1:
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+#else
+ /* start executing the instruction at rPC */
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+#endif
+
+.Lbad_arg:
+ ldr r0, strBadEntryPoint
+ @ r1 holds value of entryPoint
+ bl printf
+ bl dvmAbort
+ .fnend
+ .size dvmMterpStdRun, .-dvmMterpStdRun
+
+
+ .global dvmMterpStdBail
+ .type dvmMterpStdBail, %function
+
+/*
+ * Restore the stack pointer and PC from the save point established on entry.
+ * This is essentially the same as a longjmp, but should be cheaper. The
+ * last instruction causes us to return to whoever called dvmMterpStdRun.
+ *
+ * We pushed some registers on the stack in dvmMterpStdRun, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * r0 Thread* self
+ */
+dvmMterpStdBail:
+ ldr sp, [r0, #offThread_bailPtr] @ sp<- saved SP
+ add sp, sp, #4 @ un-align 64
+ ldmfd sp!, {r4-r10,fp,pc} @ restore 9 regs and return
+
+
+/*
+ * String references.
+ */
+strBadEntryPoint:
+ .word .LstrBadEntryPoint
+
+
+ .global dvmAsmInstructionStart
+ .type dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+ .text
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOP: /* 0x00 */
+/* File: armv5te/OP_NOP.S */
+ FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ .type dalvik_inst, %function
+dalvik_inst:
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+ .fnend
+#endif
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE: /* 0x01 */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(r1, 2) @ r1<- BBBB
+ FETCH(r0, 1) @ r0<- AAAA
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AAAA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/OP_MOVE_WIDE.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r2, r2, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/OP_MOVE_WIDE_FROM16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH(r3, 1) @ r3<- BBBB
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/OP_MOVE_WIDE_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH(r3, 2) @ r3<- BBBB
+ FETCH(r2, 1) @ r2<- AAAA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/OP_MOVE_OBJECT.S */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */
+/* File: armv5te/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/OP_MOVE_OBJECT_16.S */
+/* File: armv5te/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(r1, 2) @ r1<- BBBB
+ FETCH(r0, 1) @ r0<- AAAA
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AAAA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r0, [rSELF, #offThread_retval] @ r0<- self->retval.i
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[AA]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/OP_MOVE_RESULT_WIDE.S */
+ /* move-result-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rSELF, #offThread_retval @ r3<- &self->retval
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */
+/* File: armv5te/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r0, [rSELF, #offThread_retval] @ r0<- self->retval.i
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[AA]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/OP_MOVE_EXCEPTION.S */
+ /* move-exception vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rSELF, #offThread_exception] @ r3<- dvmGetException bypass
+ mov r1, #0 @ r1<- 0
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ SET_VREG(r3, r2) @ fp[AA]<- exception obj
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [rSELF, #offThread_exception] @ dvmClearException bypass
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/OP_RETURN_VOID.S */
+ b common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN: /* 0x0f */
+/* File: armv5te/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r0, r2) @ r0<- vAA
+ str r0, [rSELF, #offThread_retval] @ retval.i <- vAA
+ b common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/OP_RETURN_WIDE.S */
+ /*
+ * Return a 64-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ */
+ /* return-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ add r3, rSELF, #offThread_retval @ r3<- &self->retval
+ ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ stmia r3, {r0-r1} @ retval<- r0/r1
+ b common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/OP_RETURN_OBJECT.S */
+/* File: armv5te/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r0, r2) @ r0<- vAA
+ str r0, [rSELF, #offThread_retval] @ retval.i <- vAA
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_4: /* 0x12 */
+/* File: armv5te/OP_CONST_4.S */
+ /* const/4 vA, #+B */
+ mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ mov r0, rINST, lsr #8 @ r0<- A+
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r1, r0) @ fp[A]<- r1
+ GOTO_OPCODE(ip) @ execute next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_16: /* 0x13 */
+/* File: armv5te/OP_CONST_16.S */
+ /* const/16 vAA, #+BBBB */
+ FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST: /* 0x14 */
+/* File: armv5te/OP_CONST.S */
+ /* const vAA, #+BBBBbbbb */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (high)
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/OP_CONST_HIGH16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, r0, lsl #16 @ r0<- BBBB0000
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/OP_CONST_WIDE_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/OP_CONST_WIDE_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH(r0, 1) @ r0<- 0000bbbb (low)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_S(r2, 2) @ r2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/OP_CONST_WIDE.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (low middle)
+ FETCH(r2, 3) @ r2<- hhhh (high middle)
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
+ FETCH(r3, 4) @ r3<- HHHH (high)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST(5) @ advance rPC, load rINST
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/OP_CONST_WIDE_HIGH16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, #0 @ r0<- 00000000
+ mov r1, r1, lsl #16 @ r1<- BBBB0000
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/OP_CONST_STRING.S */
+ /* const/string vAA, String@BBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- self->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
+ cmp r0, #0 @ not yet resolved?
+ beq .LOP_CONST_STRING_resolve
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/OP_CONST_STRING_JUMBO.S */
+ /* const/string vAA, String@BBBBBBBB */
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (high)
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- self->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
+ cmp r0, #0
+ beq .LOP_CONST_STRING_JUMBO_resolve
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/OP_CONST_CLASS.S */
+ /* const/class vAA, Class@BBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- self->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB]
+ cmp r0, #0 @ not yet resolved?
+ beq .LOP_CONST_CLASS_resolve
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/OP_MONITOR_ENTER.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r1, r2) @ r1<- vAA (object)
+ mov r0, rSELF @ r0<- self
+ cmp r1, #0 @ null object?
+ EXPORT_PC() @ need for precise GC
+ beq common_errNullObject @ null object, throw an exception
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl dvmLockObject @ call(self, obj)
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/OP_MONITOR_EXIT.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ EXPORT_PC() @ before fetch: export the PC
+ GET_VREG(r1, r2) @ r1<- vAA (object)
+ cmp r1, #0 @ null object?
+ beq 1f @ yes
+ mov r0, rSELF @ r0<- self
+ bl dvmUnlockObject @ r0<- success for unlock(self, obj)
+ cmp r0, #0 @ failed?
+ FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST
+ beq common_exceptionThrown @ yes, exception is pending
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+1:
+ FETCH_ADVANCE_INST(1) @ advance before throw
+ b common_errNullObject
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/OP_CHECK_CAST.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH(r2, 1) @ r2<- BBBB
+ GET_VREG(r9, r3) @ r9<- object
+ ldr r0, [rSELF, #offThread_methodClassDex] @ r0<- pDvmDex
+ cmp r9, #0 @ is object null?
+ ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses
+ beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds
+ ldr r1, [r0, r2, lsl #2] @ r1<- resolved class
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ cmp r1, #0 @ have we resolved this before?
+ beq .LOP_CHECK_CAST_resolve @ not resolved, do it now
+.LOP_CHECK_CAST_resolved:
+ cmp r0, r1 @ same class (trivial success)?
+ bne .LOP_CHECK_CAST_fullcheck @ no, do full check
+.LOP_CHECK_CAST_okay:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/OP_INSTANCE_OF.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB (object)
+ and r9, r9, #15 @ r9<- A
+ cmp r0, #0 @ is object null?
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- pDvmDex
+ beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0
+ FETCH(r3, 1) @ r3<- CCCC
+ ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses
+ ldr r1, [r2, r3, lsl #2] @ r1<- resolved class
+ ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
+ cmp r1, #0 @ have we resolved this before?
+ beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now
+.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class
+ cmp r0, r1 @ same class (trivial success)?
+ beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish
+ b .LOP_INSTANCE_OF_fullcheck @ no, do full check
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/OP_ARRAY_LENGTH.S */
+ /*
+ * Return the length of an array.
+ */
+ mov r1, rINST, lsr #12 @ r1<- B
+ mov r2, rINST, lsr #8 @ r2<- A+
+ GET_VREG(r0, r1) @ r0<- vB (object ref)
+ and r2, r2, #15 @ r2<- A
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r3, [r0, #offArrayObject_length] @ r3<- array length
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r3, r2) @ vB<- length
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/OP_NEW_INSTANCE.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+#if defined(WITH_JIT)
+ add r10, r3, r1, lsl #2 @ r10<- &resolved_class
+#endif
+ EXPORT_PC() @ req'd for init, resolve, alloc
+ cmp r0, #0 @ already resolved?
+ beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now
+.LOP_NEW_INSTANCE_resolved: @ r0=class
+ ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum
+ cmp r1, #CLASS_INITIALIZED @ has class been initialized?
+ bne .LOP_NEW_INSTANCE_needinit @ no, init class now
+.LOP_NEW_INSTANCE_initialized: @ r0=class
+ mov r1, #ALLOC_DONT_TRACK @ flags for alloc call
+ bl dvmAllocObject @ r0<- new object
+ b .LOP_NEW_INSTANCE_finish @ continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/OP_NEW_ARRAY.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ FETCH(r2, 1) @ r2<- CCCC
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ GET_VREG(r1, r0) @ r1<- vB (array length)
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ cmp r1, #0 @ check length
+ ldr r0, [r3, r2, lsl #2] @ r0<- resolved class
+ bmi common_errNegativeArraySize @ negative length, bail - len in r1
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ req'd for resolve, alloc
+ bne .LOP_NEW_ARRAY_finish @ resolved, continue
+ b .LOP_NEW_ARRAY_resolve @ do resolve now
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ EXPORT_PC() @ need for resolve and alloc
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ mov r10, rINST, lsr #8 @ r10<- AA or BA
+ cmp r0, #0 @ already resolved?
+ bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on
+8: ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */
+/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ EXPORT_PC() @ need for resolve and alloc
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ mov r10, rINST, lsr #8 @ r10<- AA or BA
+ cmp r0, #0 @ already resolved?
+ bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on
+8: ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_RANGE_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/OP_FILL_ARRAY_DATA.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ GET_VREG(r0, r3) @ r0<- vAA (array object)
+ add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
+ EXPORT_PC();
+ bl dvmInterpHandleFillArrayData@ fill the array with predefined data
+ cmp r0, #0 @ 0 means an exception is thrown
+ beq common_exceptionThrown @ has exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_THROW: /* 0x27 */
+/* File: armv5te/OP_THROW.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r1, r2) @ r1<- vAA (exception object)
+ EXPORT_PC() @ exception handler can throw
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes, throw an NPE instead
+ @ bypass dvmSetException, just store it
+ str r1, [rSELF, #offThread_exception] @ thread->exception<- obj
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO: /* 0x28 */
+/* File: armv5te/OP_GOTO.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ /* tuning: use sbfx for 6t2+ targets */
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ @ If backwards branch refresh rIBASE
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ bmi common_testUpdateProfile @ (r0) check for trace hotness
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/OP_GOTO_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended)
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ bmi common_testUpdateProfile @ (r0) hot trace head?
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/OP_GOTO_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+ FETCH(r0, 1) @ r0<- aaaa (lo)
+ FETCH(r1, 2) @ r1<- AAAA (hi)
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ ble common_testUpdateProfile @ (r0) hot trace head?
+#else
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * When the JIT is present, all targets are considered treated as
+ * a potential trace heads regardless of branch direction.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG(r1, r3) @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ cmp r0, #0
+ bne common_updateProfile
+#else
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/OP_SPARSE_SWITCH.S */
+/* File: armv5te/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * When the JIT is present, all targets are considered treated as
+ * a potential trace heads regardless of branch direction.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG(r1, r3) @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ cmp r0, #0
+ bne common_updateProfile
+#else
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_FLOAT: /* 0x2d */
+/* File: arm-vfp/OP_CMPL_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ b .LOP_CMPL_FLOAT_finish @ argh
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_FLOAT: /* 0x2e */
+/* File: arm-vfp/OP_CMPG_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ flds s0, [r2] @ s0<- vBB
+ flds s1, [r3] @ s1<- vCC
+ fcmpes s0, s1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ b .LOP_CMPG_FLOAT_finish @ argh
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: arm-vfp/OP_CMPL_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x > y) {
+ * return 1;
+ * } else if (x < y) {
+ * return -1;
+ * } else {
+ * return -1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ mvn r0, #0 @ r0<- -1 (default)
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fmstat @ export status flags
+ movgt r0, #1 @ (greater than) r1<- 1
+ moveq r0, #0 @ (equal) r1<- 0
+ b .LOP_CMPL_DOUBLE_finish @ argh
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: arm-vfp/OP_CMPG_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * int compare(x, y) {
+ * if (x == y) {
+ * return 0;
+ * } else if (x < y) {
+ * return -1;
+ * } else if (x > y) {
+ * return 1;
+ * } else {
+ * return 1;
+ * }
+ * }
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ fldd d0, [r2] @ d0<- vBB
+ fldd d1, [r3] @ d1<- vCC
+ fcmped d0, d1 @ compare (vBB, vCC)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ mov r0, #1 @ r0<- 1 (default)
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fmstat @ export status flags
+ mvnmi r0, #0 @ (less than) r1<- -1
+ moveq r0, #0 @ (equal) r1<- 0
+ b .LOP_CMPG_DOUBLE_finish @ argh
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/OP_CMP_LONG.S */
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .LOP_CMP_LONG_less @ signed compare on high part
+ bgt .LOP_CMP_LONG_greater
+ subs r1, r0, r2 @ r1<- r0 - r2
+ bhi .LOP_CMP_LONG_greater @ unsigned compare on low part
+ bne .LOP_CMP_LONG_less
+ b .LOP_CMP_LONG_finish @ equal; r1 already holds 0
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/OP_IF_EQ.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movne r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NE: /* 0x33 */
+/* File: armv5te/OP_IF_NE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ moveq r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LT: /* 0x34 */
+/* File: armv5te/OP_IF_LT.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movge r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GE: /* 0x35 */
+/* File: armv5te/OP_IF_GE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movlt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GT: /* 0x36 */
+/* File: armv5te/OP_IF_GT.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movle r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LE: /* 0x37 */
+/* File: armv5te/OP_IF_LE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movgt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/OP_IF_EQZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movne r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/OP_IF_NEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ moveq r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/OP_IF_LTZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movge r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/OP_IF_GEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movlt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/OP_IF_GTZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movle r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/OP_IF_LEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movgt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/OP_UNUSED_3E.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/OP_UNUSED_3F.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/OP_UNUSED_40.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/OP_UNUSED_41.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/OP_UNUSED_42.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/OP_UNUSED_43.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET: /* 0x44 */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5te/OP_AGET_WIDE.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcc .LOP_AGET_WIDE_finish @ okay, continue below
+ b common_errArrayIndex @ index >= length, bail
+ @ May want to swap the order of these two branches depending on how the
+ @ branch prediction (if any) handles conditional forward branches vs.
+ @ unconditional forward branches.
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/OP_AGET_OBJECT.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/OP_AGET_BOOLEAN.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/OP_AGET_BYTE.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/OP_AGET_CHAR.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/OP_AGET_SHORT.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT: /* 0x4b */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5te/OP_APUT_WIDE.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ bcc .LOP_APUT_WIDE_finish @ okay, continue below
+ b common_errArrayIndex @ index >= length, bail
+ @ May want to swap the order of these two branches depending on how the
+ @ branch prediction (if any) handles conditional forward branches vs.
+ @ unconditional forward branches.
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/OP_APUT_OBJECT.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(rINST, r2) @ rINST<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp rINST, #0 @ null array object?
+ GET_VREG(r9, r9) @ r9<- vAA
+ beq common_errNullObject @ yes, bail
+ ldr r3, [rINST, #offArrayObject_length] @ r3<- arrayObj->length
+ add r10, rINST, r1, lsl #2 @ r10<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on
+ b common_errArrayIndex @ index >= length, bail
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/OP_APUT_BOOLEAN.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/OP_APUT_BYTE.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/OP_APUT_CHAR.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/OP_APUT_SHORT.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET: /* 0x52 */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5te/OP_IGET_WIDE.S */
+ /*
+ * Wide 32-bit instance field get.
+ */
+ /* iget-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_WIDE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_WIDE_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/OP_IGET_OBJECT.S */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_OBJECT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_OBJECT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/OP_IGET_BOOLEAN.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_BOOLEAN_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_BOOLEAN_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/OP_IGET_BYTE.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_BYTE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_BYTE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/OP_IGET_CHAR.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_CHAR_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_CHAR_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/OP_IGET_SHORT.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_SHORT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_SHORT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT: /* 0x59 */
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5te/OP_IPUT_WIDE.S */
+ /* iput-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_WIDE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_WIDE_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/OP_IPUT_OBJECT.S */
+ /*
+ * 32-bit instance field put.
+ *
+ * for: iput-object, iput-object-volatile
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_OBJECT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_OBJECT_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/OP_IPUT_BOOLEAN.S */
+@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/OP_IPUT_BYTE.S */
+@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_BYTE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_BYTE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/OP_IPUT_CHAR.S */
+@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_CHAR_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_CHAR_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/OP_IPUT_SHORT.S */
+@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_SHORT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_SHORT_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET: /* 0x60 */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_resolve @ yes, do resolve
+.LOP_SGET_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5te/OP_SGET_WIDE.S */
+ /*
+ * 64-bit SGET handler.
+ */
+ /* sget-wide vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_WIDE_resolve @ yes, do resolve
+.LOP_SGET_WIDE_finish:
+ mov r9, rINST, lsr #8 @ r9<- AA
+ .if 0
+ add r0, r0, #offStaticField_value @ r0<- pointer to data
+ bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field
+ .else
+ ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+ .endif
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/OP_SGET_OBJECT.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_OBJECT_resolve @ yes, do resolve
+.LOP_SGET_OBJECT_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/OP_SGET_BOOLEAN.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve
+.LOP_SGET_BOOLEAN_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/OP_SGET_BYTE.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_BYTE_resolve @ yes, do resolve
+.LOP_SGET_BYTE_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/OP_SGET_CHAR.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_CHAR_resolve @ yes, do resolve
+.LOP_SGET_CHAR_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/OP_SGET_SHORT.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_SHORT_resolve @ yes, do resolve
+.LOP_SGET_SHORT_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT: /* 0x67 */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_resolve @ yes, do resolve
+.LOP_SPUT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5te/OP_SPUT_WIDE.S */
+ /*
+ * 64-bit SPUT handler.
+ */
+ /* sput-wide vAA, field@BBBB */
+ ldr r0, [rSELF, #offThread_methodClassDex] @ r0<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r0, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r10, r1, lsl #2] @ r2<- resolved StaticField ptr
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r2, #0 @ is resolved entry null?
+ beq .LOP_SPUT_WIDE_resolve @ yes, do resolve
+.LOP_SPUT_WIDE_finish: @ field ptr in r2, AA in r9
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_INST_OPCODE(r10) @ extract opcode from rINST
+ .if 0
+ add r2, r2, #offStaticField_value @ r2<- pointer to data
+ bl dvmQuasiAtomicSwap64Sync @ stores r0/r1 into addr r2
+ .else
+ strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1
+ .endif
+ GOTO_OPCODE(r10) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/OP_SPUT_OBJECT.S */
+ /*
+ * 32-bit SPUT handler for objects
+ *
+ * for: sput-object, sput-object-volatile
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve
+.LOP_SPUT_OBJECT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ ldr r9, [r0, #offField_clazz] @ r9<- field->clazz
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ b .LOP_SPUT_OBJECT_end
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/OP_SPUT_BOOLEAN.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve
+.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/OP_SPUT_BYTE.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_BYTE_resolve @ yes, do resolve
+.LOP_SPUT_BYTE_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/OP_SPUT_CHAR.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_CHAR_resolve @ yes, do resolve
+.LOP_SPUT_CHAR_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/OP_SPUT_SHORT.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_SHORT_resolve @ yes, do resolve
+.LOP_SPUT_SHORT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_VIRTUAL_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ cmp r9, #0 @ null "this"?
+ ldr r10, [rSELF, #offThread_method] @ r10<- current method
+ beq common_errNullObject @ null "this", throw exception
+ cmp r0, #0 @ already resolved?
+ ldr r10, [r10, #offMethod_clazz] @ r10<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_SUPER_continue @ resolved, continue on
+ b .LOP_INVOKE_SUPER_resolve @ do resolve now
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now
+.LOP_INVOKE_DIRECT_finish:
+ cmp r9, #0 @ null "this" ref?
+ bne common_invokeMethodNoRange @ r0=method, r9="this"
+ b common_errNullObject @ yes, throw exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ mov r9, #0 @ null "this" in delay slot
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+#if defined(WITH_JIT)
+ add r10, r3, r1, lsl #2 @ r10<- &resolved_methodToCall
+#endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne common_invokeMethodNoRange @ yes, continue on
+ b .LOP_INVOKE_STATIC_resolve
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r2, 2) @ r2<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!0)
+ and r2, r2, #15 @ r2<- C (or stays CCCC)
+ .endif
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r9, r2) @ r9<- first arg ("this")
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- methodClassDex
+ cmp r9, #0 @ null obj?
+ ldr r2, [rSELF, #offThread_method] @ r2<- method
+ beq common_errNullObject @ yes, fail
+ ldr r0, [r9, #offObject_clazz] @ r0<- thisPtr->clazz
+ bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle exception
+ b common_invokeMethodNoRange @ (r0=method, r9="this")
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/OP_UNUSED_73.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */
+/* File: armv5te/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */
+/* File: armv5te/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ cmp r9, #0 @ null "this"?
+ ldr r10, [rSELF, #offThread_method] @ r10<- current method
+ beq common_errNullObject @ null "this", throw exception
+ cmp r0, #0 @ already resolved?
+ ldr r10, [r10, #offMethod_clazz] @ r10<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on
+ b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */
+/* File: armv5te/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now
+.LOP_INVOKE_DIRECT_RANGE_finish:
+ cmp r9, #0 @ null "this" ref?
+ bne common_invokeMethodRange @ r0=method, r9="this"
+ b common_errNullObject @ yes, throw exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */
+/* File: armv5te/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ mov r9, #0 @ null "this" in delay slot
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+#if defined(WITH_JIT)
+ add r10, r3, r1, lsl #2 @ r10<- &resolved_methodToCall
+#endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne common_invokeMethodRange @ yes, continue on
+ b .LOP_INVOKE_STATIC_RANGE_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */
+/* File: armv5te/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r2, 2) @ r2<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!1)
+ and r2, r2, #15 @ r2<- C (or stays CCCC)
+ .endif
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r9, r2) @ r9<- first arg ("this")
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- methodClassDex
+ cmp r9, #0 @ null obj?
+ ldr r2, [rSELF, #offThread_method] @ r2<- method
+ beq common_errNullObject @ yes, fail
+ ldr r0, [r9, #offObject_clazz] @ r0<- thisPtr->clazz
+ bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle exception
+ b common_invokeMethodRange @ (r0=method, r9="this")
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/OP_UNUSED_79.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/OP_UNUSED_7A.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/OP_NEG_INT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ rsb r0, r0, #0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/OP_NOT_INT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mvn r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/OP_NEG_LONG.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ rsbs r0, r0, #0 @ optional op; may set condition codes
+ rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/OP_NOT_LONG.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mvn r0, r0 @ optional op; may set condition codes
+ mvn r1, r1 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/OP_NEG_FLOAT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/OP_NEG_DOUBLE.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/OP_INT_TO_LONG.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: arm-vfp/OP_INT_TO_FLOAT.S */
+/* File: arm-vfp/funop.S */
+ /*
+ * Generic 32-bit unary floating-point operation. Provide an "instr"
+ * line that specifies an instruction that performs "s1 = op s0".
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fsitos s1, s0 @ s1<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ fsts s1, [r9] @ vA<- s1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: arm-vfp/OP_INT_TO_DOUBLE.S */
+/* File: arm-vfp/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fsitod d0, s0 @ d0<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ fstd d0, [r9] @ vA<- d0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/OP_LONG_TO_INT.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/OP_LONG_TO_FLOAT.S */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/OP_LONG_TO_DOUBLE.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: arm-vfp/OP_FLOAT_TO_INT.S */
+/* File: arm-vfp/funop.S */
+ /*
+ * Generic 32-bit unary floating-point operation. Provide an "instr"
+ * line that specifies an instruction that performs "s1 = op s0".
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ ftosizs s1, s0 @ s1<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ fsts s1, [r9] @ vA<- s1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/OP_FLOAT_TO_LONG.S */
+@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"}
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl f2l_doconv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: arm-vfp/OP_FLOAT_TO_DOUBLE.S */
+/* File: arm-vfp/funopWider.S */
+ /*
+ * Generic 32bit-to-64bit floating point unary operation. Provide an
+ * "instr" line that specifies an instruction that performs "d0 = op s0".
+ *
+ * For: int-to-double, float-to-double
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ flds s0, [r3] @ s0<- vB
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fcvtds d0, s0 @ d0<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ fstd d0, [r9] @ vA<- d0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: arm-vfp/OP_DOUBLE_TO_INT.S */
+/* File: arm-vfp/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary floating point operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ fldd d0, [r3] @ d0<- vB
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ ftosizd s0, d0 @ s0<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ fsts s0, [r9] @ vA<- s0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/OP_DOUBLE_TO_LONG.S */
+@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"}
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl d2l_doconv @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: arm-vfp/OP_DOUBLE_TO_FLOAT.S */
+/* File: arm-vfp/funopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary floating point operation. Provide an
+ * "instr" line that specifies an instruction that performs "s0 = op d0".
+ *
+ * For: double-to-int, double-to-float
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ fldd d0, [r3] @ d0<- vB
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ and r9, r9, #15 @ r9<- A
+ fcvtsd s0, d0 @ s0<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ fsts s0, [r9] @ vA<- s0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5te/OP_INT_TO_BYTE.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ mov r0, r0, asl #24 @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r0, r0, asr #24 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5te/OP_INT_TO_CHAR.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ mov r0, r0, asl #16 @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5te/OP_INT_TO_SHORT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ mov r0, r0, asl #16 @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r0, r0, asr #16 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/OP_ADD_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/OP_SUB_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/OP_MUL_INT.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/OP_DIV_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT: /* 0x94 */
+/* File: armv5te/OP_REM_INT.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT: /* 0x95 */
+/* File: armv5te/OP_AND_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT: /* 0x96 */
+/* File: armv5te/OP_OR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/OP_XOR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/OP_SHL_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/OP_SHR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/OP_USHR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/OP_ADD_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/OP_SUB_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/OP_MUL_LONG.S */
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST, lsr #8 @ r0<- AA
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_MUL_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/OP_DIV_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/OP_REM_LONG.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/OP_AND_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/OP_OR_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/OP_XOR_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/OP_SHL_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_SHL_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/OP_SHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_SHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/OP_USHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_USHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT: /* 0xa6 */
+/* File: arm-vfp/OP_ADD_FLOAT.S */
+/* File: arm-vfp/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ fadds s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT: /* 0xa7 */
+/* File: arm-vfp/OP_SUB_FLOAT.S */
+/* File: arm-vfp/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ fsubs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT: /* 0xa8 */
+/* File: arm-vfp/OP_MUL_FLOAT.S */
+/* File: arm-vfp/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ fmuls s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT: /* 0xa9 */
+/* File: arm-vfp/OP_DIV_FLOAT.S */
+/* File: arm-vfp/fbinop.S */
+ /*
+ * Generic 32-bit floating-point operation. Provide an "instr" line that
+ * specifies an instruction that performs "s2 = s0 op s1". Because we
+ * use the "softfp" ABI, this must be an instruction, not a function call.
+ *
+ * For: add-float, sub-float, mul-float, div-float
+ */
+ /* floatop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ flds s1, [r3] @ s1<- vCC
+ flds s0, [r2] @ s0<- vBB
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ fdivs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/OP_REM_FLOAT.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE: /* 0xab */
+/* File: arm-vfp/OP_ADD_DOUBLE.S */
+/* File: arm-vfp/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ faddd d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE: /* 0xac */
+/* File: arm-vfp/OP_SUB_DOUBLE.S */
+/* File: arm-vfp/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ fsubd d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE: /* 0xad */
+/* File: arm-vfp/OP_MUL_DOUBLE.S */
+/* File: arm-vfp/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ fmuld d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE: /* 0xae */
+/* File: arm-vfp/OP_DIV_DOUBLE.S */
+/* File: arm-vfp/fbinopWide.S */
+ /*
+ * Generic 64-bit double-precision floating point binary operation.
+ * Provide an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * for: add-double, sub-double, mul-double, div-double
+ */
+ /* doubleop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vCC
+ VREG_INDEX_TO_ADDR(r2, r2) @ r2<- &vBB
+ fldd d1, [r3] @ d1<- vCC
+ fldd d0, [r2] @ d0<- vBB
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ fdivd d2, d0, d1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vAA
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/OP_REM_DOUBLE.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/OP_ADD_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/OP_SUB_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/OP_MUL_INT_2ADDR.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/OP_DIV_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/OP_REM_INT_2ADDR.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/OP_AND_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/OP_OR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/OP_XOR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/OP_SHL_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/OP_SHR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/OP_USHR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/OP_ADD_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/OP_SUB_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/OP_MUL_LONG_2ADDR.S */
+ /*
+ * Signed 64-bit integer multiply, "/2addr" version.
+ *
+ * See OP_MUL_LONG for an explanation.
+ *
+ * We get a little tight on registers, so to avoid looking up &fp[A]
+ * again we stuff it into rINST.
+ */
+ /* mul-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST @ r0<- &fp[A] (free up rINST)
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/OP_DIV_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/OP_REM_LONG_2ADDR.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/OP_AND_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/OP_OR_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/OP_XOR_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/OP_SHL_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ b .LOP_SHL_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/OP_SHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ b .LOP_SHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/OP_USHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ b .LOP_USHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: arm-vfp/OP_ADD_FLOAT_2ADDR.S */
+/* File: arm-vfp/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fadds s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: arm-vfp/OP_SUB_FLOAT_2ADDR.S */
+/* File: arm-vfp/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fsubs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: arm-vfp/OP_MUL_FLOAT_2ADDR.S */
+/* File: arm-vfp/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fmuls s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: arm-vfp/OP_DIV_FLOAT_2ADDR.S */
+/* File: arm-vfp/fbinop2addr.S */
+ /*
+ * Generic 32-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "s2 = s0 op s1".
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ flds s1, [r3] @ s1<- vB
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ flds s0, [r9] @ s0<- vA
+
+ fdivs s2, s0, s1 @ s2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fsts s2, [r9] @ vAA<- s2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/OP_REM_FLOAT_2ADDR.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: arm-vfp/OP_ADD_DOUBLE_2ADDR.S */
+/* File: arm-vfp/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ faddd d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: arm-vfp/OP_SUB_DOUBLE_2ADDR.S */
+/* File: arm-vfp/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ fsubd d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: arm-vfp/OP_MUL_DOUBLE_2ADDR.S */
+/* File: arm-vfp/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ fmuld d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: arm-vfp/OP_DIV_DOUBLE_2ADDR.S */
+/* File: arm-vfp/fbinopWide2addr.S */
+ /*
+ * Generic 64-bit floating point "/2addr" binary operation. Provide
+ * an "instr" line that specifies an instruction that performs
+ * "d2 = d0 op d1".
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ VREG_INDEX_TO_ADDR(r3, r3) @ r3<- &vB
+ and r9, r9, #15 @ r9<- A
+ fldd d1, [r3] @ d1<- vB
+ VREG_INDEX_TO_ADDR(r9, r9) @ r9<- &vA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ fldd d0, [r9] @ d0<- vA
+
+ fdivd d2, d0, d1 @ d2<- op
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ fstd d2, [r9] @ vAA<- d2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/OP_ADD_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/OP_RSUB_INT.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/OP_MUL_INT_LIT16.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/OP_DIV_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/OP_REM_INT_LIT16.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/OP_AND_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/OP_OR_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/OP_XOR_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/OP_ADD_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/OP_RSUB_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/OP_MUL_INT_LIT8.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/OP_DIV_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 1
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/OP_REM_INT_LIT8.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 1
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/OP_AND_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/OP_OR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/OP_XOR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/OP_SHL_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/OP_SHR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/OP_USHR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: armv5te/OP_IGET_VOLATILE.S */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_VOLATILE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: armv5te/OP_IPUT_VOLATILE.S */
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_VOLATILE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: armv5te/OP_SGET_VOLATILE.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_VOLATILE_resolve @ yes, do resolve
+.LOP_SGET_VOLATILE_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ SMP_DMB @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: armv5te/OP_SPUT_VOLATILE.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_VOLATILE_resolve @ yes, do resolve
+.LOP_SPUT_VOLATILE_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SMP_DMB_ST @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ SMP_DMB
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: armv5te/OP_IGET_OBJECT_VOLATILE.S */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_OBJECT_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_OBJECT_VOLATILE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: armv5te/OP_IGET_WIDE_VOLATILE.S */
+/* File: armv5te/OP_IGET_WIDE.S */
+ /*
+ * Wide 32-bit instance field get.
+ */
+ /* iget-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_WIDE_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_WIDE_VOLATILE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: armv5te/OP_IPUT_WIDE_VOLATILE.S */
+/* File: armv5te/OP_IPUT_WIDE.S */
+ /* iput-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_WIDE_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_WIDE_VOLATILE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: armv5te/OP_SGET_WIDE_VOLATILE.S */
+/* File: armv5te/OP_SGET_WIDE.S */
+ /*
+ * 64-bit SGET handler.
+ */
+ /* sget-wide vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_WIDE_VOLATILE_resolve @ yes, do resolve
+.LOP_SGET_WIDE_VOLATILE_finish:
+ mov r9, rINST, lsr #8 @ r9<- AA
+ .if 1
+ add r0, r0, #offStaticField_value @ r0<- pointer to data
+ bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field
+ .else
+ ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+ .endif
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: armv5te/OP_SPUT_WIDE_VOLATILE.S */
+/* File: armv5te/OP_SPUT_WIDE.S */
+ /*
+ * 64-bit SPUT handler.
+ */
+ /* sput-wide vAA, field@BBBB */
+ ldr r0, [rSELF, #offThread_methodClassDex] @ r0<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r0, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r10, r1, lsl #2] @ r2<- resolved StaticField ptr
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r2, #0 @ is resolved entry null?
+ beq .LOP_SPUT_WIDE_VOLATILE_resolve @ yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_finish: @ field ptr in r2, AA in r9
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_INST_OPCODE(r10) @ extract opcode from rINST
+ .if 1
+ add r2, r2, #offStaticField_value @ r2<- pointer to data
+ bl dvmQuasiAtomicSwap64Sync @ stores r0/r1 into addr r2
+ .else
+ strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1
+ .endif
+ GOTO_OPCODE(r10) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_BREAKPOINT: /* 0xec */
+/* File: armv5te/OP_BREAKPOINT.S */
+ /*
+ * Breakpoint handler.
+ *
+ * Restart this instruction with the original opcode. By
+ * the time we get here, the breakpoint will have already been
+ * handled.
+ */
+ mov r0, rPC
+ bl dvmGetOriginalOpcode @ (rPC)
+ FETCH(rINST, 0) @ reload OP_BREAKPOINT + rest of inst
+ ldr r1, [rSELF, #offThread_mainHandlerTable]
+ and rINST, #0xff00
+ orr rINST, rINST, r0
+ GOTO_OPCODE_BASE(r1, r0)
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */
+ /*
+ * Handle a throw-verification-error instruction. This throws an
+ * exception for an error discovered during verification. The
+ * exception is indicated by AA, with some detail provided by BBBB.
+ */
+ /* op AA, ref@BBBB */
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ FETCH(r2, 1) @ r2<- BBBB
+ EXPORT_PC() @ export the PC
+ mov r1, rINST, lsr #8 @ r1<- AA
+ bl dvmThrowVerificationError @ always throws
+ b common_exceptionThrown @ handle exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/OP_EXECUTE_INLINE.S */
+ /*
+ * Execute a "native inline" instruction.
+ *
+ * We need to call an InlineOp4Func:
+ * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+ *
+ * The first four args are in r0-r3, pointer to return value storage
+ * is on the stack. The function's return value is a flag that tells
+ * us if an exception was thrown.
+ *
+ * TUNING: could maintain two tables, pointer in Thread and
+ * swap if profiler/debuggger active.
+ */
+ /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+ ldrh r2, [rSELF, #offThread_subMode]
+ FETCH(r10, 1) @ r10<- BBBB
+ EXPORT_PC() @ can throw
+ ands r2, #kSubModeDebugProfile @ Any going on?
+ bne .LOP_EXECUTE_INLINE_debugmode @ yes - take slow path
+.LOP_EXECUTE_INLINE_resume:
+ add r1, rSELF, #offThread_retval @ r1<- &self->retval
+ sub sp, sp, #8 @ make room for arg, +64 bit align
+ mov r0, rINST, lsr #12 @ r0<- B
+ str r1, [sp] @ push &self->retval
+ bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ add sp, sp, #8 @ pop stack
+ cmp r0, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */
+ /*
+ * Execute a "native inline" instruction, using "/range" semantics.
+ * Same idea as execute-inline, but we get the args differently.
+ *
+ * We need to call an InlineOp4Func:
+ * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+ *
+ * The first four args are in r0-r3, pointer to return value storage
+ * is on the stack. The function's return value is a flag that tells
+ * us if an exception was thrown.
+ */
+ /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
+ ldrh r2, [rSELF, #offThread_subMode]
+ FETCH(r10, 1) @ r10<- BBBB
+ EXPORT_PC() @ can throw
+ ands r2, #kSubModeDebugProfile @ Any going on?
+ bne .LOP_EXECUTE_INLINE_RANGE_debugmode @ yes - take slow path
+.LOP_EXECUTE_INLINE_RANGE_resume:
+ add r1, rSELF, #offThread_retval @ r1<- &self->retval
+ sub sp, sp, #8 @ make room for arg, +64 bit align
+ mov r0, rINST, lsr #8 @ r0<- AA
+ str r1, [sp] @ push &self->retval
+ bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ add sp, sp, #8 @ pop stack
+ cmp r0, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
+ /*
+ * Invoke Object.<init> on an object. In practice we know that
+ * Object's nullary constructor doesn't do anything, so we just
+ * skip it unless a debugger is active.
+ */
+ FETCH(r1, 2) @ r1<- CCCC
+ GET_VREG(r0, r1) @ r0<- "this" ptr
+ cmp r0, #0 @ check for NULL
+ beq common_errNullObject @ export PC and throw NPE
+ ldr r1, [r0, #offObject_clazz] @ r1<- obj->clazz
+ ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+ tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
+ bne .LOP_INVOKE_OBJECT_INIT_RANGE_setFinal @ yes, go
+.LOP_INVOKE_OBJECT_INIT_RANGE_finish:
+ ldrh r1, [rSELF, #offThread_subMode]
+ ands r1, #kSubModeDebuggerActive @ debugger active?
+ bne .LOP_INVOKE_OBJECT_INIT_RANGE_debugger @ Yes - skip optimization
+ FETCH_ADVANCE_INST(2+1) @ advance to next instr, load rINST
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/OP_RETURN_VOID_BARRIER.S */
+ SMP_DMB_ST
+ b common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5te/OP_IGET_WIDE_QUICK.S */
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(ip, 1) @ ip<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
+ and r2, r2, #15
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/OP_IGET_OBJECT_QUICK.S */
+/* File: armv5te/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/OP_IPUT_QUICK.S */
+ /* For: iput-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ and r2, r2, #15
+ GET_VREG(r0, r2) @ r0<- fp[A]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5te/OP_IPUT_WIDE_QUICK.S */
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A(+)
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r2, r1) @ r2<- fp[B], the object pointer
+ add r3, rFP, r0, lsl #2 @ r3<- &fp[A]
+ cmp r2, #0 @ check object for null
+ ldmia r3, {r0-r1} @ r0/r1<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH(r3, 1) @ r3<- field byte offset
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */
+ /* For: iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ and r2, r2, #15
+ GET_VREG(r0, r2) @ r0<- fp[A]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
+ cmp r0, #0
+ strneb r2, [r2, r3, lsr #GC_CARD_SHIFT] @ mark card based on obj head
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r3, 2) @ r3<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!0)
+ and r3, r3, #15 @ r3<- C (or stays CCCC)
+ .endif
+ GET_VREG(r9, r3) @ r9<- vC ("this" ptr)
+ cmp r9, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r2, [r9, #offObject_clazz] @ r2<- thisPtr->clazz
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
+ EXPORT_PC() @ invoke must export
+ ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
+ bl common_invokeMethodNoRange @ (r0=method, r9="this")
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r3, 2) @ r3<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!1)
+ and r3, r3, #15 @ r3<- C (or stays CCCC)
+ .endif
+ GET_VREG(r9, r3) @ r9<- vC ("this" ptr)
+ cmp r9, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r2, [r9, #offObject_clazz] @ r2<- thisPtr->clazz
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
+ EXPORT_PC() @ invoke must export
+ ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
+ bl common_invokeMethodRange @ (r0=method, r9="this")
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
+ GET_VREG(r9, r10) @ r9<- "this"
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
+ cmp r9, #0 @ null "this" ref?
+ ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
+ beq common_errNullObject @ "this" is null, throw exception
+ bl common_invokeMethodNoRange @ (r0=method, r9="this")
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
+ GET_VREG(r9, r10) @ r9<- "this"
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
+ cmp r9, #0 @ null "this" ref?
+ ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
+ beq common_errNullObject @ "this" is null, throw exception
+ bl common_invokeMethodRange @ (r0=method, r9="this")
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: armv5te/OP_IPUT_OBJECT_VOLATILE.S */
+/* File: armv5te/OP_IPUT_OBJECT.S */
+ /*
+ * 32-bit instance field put.
+ *
+ * for: iput-object, iput-object-volatile
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_OBJECT_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_OBJECT_VOLATILE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: armv5te/OP_SGET_OBJECT_VOLATILE.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_OBJECT_VOLATILE_resolve @ yes, do resolve
+.LOP_SGET_OBJECT_VOLATILE_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ SMP_DMB @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: armv5te/OP_SPUT_OBJECT_VOLATILE.S */
+/* File: armv5te/OP_SPUT_OBJECT.S */
+ /*
+ * 32-bit SPUT handler for objects
+ *
+ * for: sput-object, sput-object-volatile
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_OBJECT_VOLATILE_resolve @ yes, do resolve
+.LOP_SPUT_OBJECT_VOLATILE_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ ldr r9, [r0, #offField_clazz] @ r9<- field->clazz
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SMP_DMB_ST @ releasing store
+ b .LOP_SPUT_OBJECT_VOLATILE_end
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FF: /* 0xff */
+/* File: armv5te/OP_UNUSED_FF.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+ .balign 64
+ .size dvmAsmInstructionStart, .-dvmAsmInstructionStart
+ .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global dvmAsmSisterStart
+ .type dvmAsmSisterStart, %function
+ .text
+ .balign 4
+dvmAsmSisterStart:
+
+/* continuation for OP_CONST_STRING */
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * r1: BBBB (String ref)
+ * r9: target register
+ */
+.LOP_CONST_STRING_resolve:
+ EXPORT_PC()
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveString @ r0<- String reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CONST_STRING_JUMBO */
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * r1: BBBBBBBB (String ref)
+ * r9: target register
+ */
+.LOP_CONST_STRING_JUMBO_resolve:
+ EXPORT_PC()
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveString @ r0<- String reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CONST_CLASS */
+
+ /*
+ * Continuation if the Class has not yet been resolved.
+ * r1: BBBB (Class ref)
+ * r9: target register
+ */
+.LOP_CONST_CLASS_resolve:
+ EXPORT_PC()
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ mov r2, #1 @ r2<- true
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- Class reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CHECK_CAST */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * r0 holds obj->clazz
+ * r1 holds desired class resolved from BBBB
+ * r9 holds object
+ */
+.LOP_CHECK_CAST_fullcheck:
+ mov r10, r1 @ avoid ClassObject getting clobbered
+ bl dvmInstanceofNonTrivial @ r0<- boolean result
+ cmp r0, #0 @ failed?
+ bne .LOP_CHECK_CAST_okay @ no, success
+
+ @ A cast has failed. We need to throw a ClassCastException.
+ EXPORT_PC() @ about to throw
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz (actual class)
+ mov r1, r10 @ r1<- desired class
+ bl dvmThrowClassCastException
+ b common_exceptionThrown
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r2 holds BBBB
+ * r9 holds object
+ */
+.LOP_CHECK_CAST_resolve:
+ EXPORT_PC() @ resolve() could throw
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r1, r2 @ r1<- BBBB
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ mov r1, r0 @ r1<- class resolved from BBB
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ b .LOP_CHECK_CAST_resolved @ pick up where we left off
+
+/* continuation for OP_INSTANCE_OF */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * r0 holds obj->clazz
+ * r1 holds class resolved from BBBB
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_fullcheck:
+ bl dvmInstanceofNonTrivial @ r0<- boolean result
+ @ fall through to OP_INSTANCE_OF_store
+
+ /*
+ * r0 holds boolean result
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_store:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Trivial test succeeded, save and bail.
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_trivial:
+ mov r0, #1 @ indicate success
+ @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r3 holds BBBB
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_resolve:
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ mov r1, r3 @ r1<- BBBB
+ mov r2, #1 @ r2<- true
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ mov r1, r0 @ r1<- class resolved from BBB
+ mov r3, rINST, lsr #12 @ r3<- B
+ GET_VREG(r0, r3) @ r0<- vB (object)
+ ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
+ b .LOP_INSTANCE_OF_resolved @ pick up where we left off
+
+/* continuation for OP_NEW_INSTANCE */
+
+ .balign 32 @ minimize cache lines
+.LOP_NEW_INSTANCE_finish: @ r0=new object
+ mov r3, rINST, lsr #8 @ r3<- AA
+ cmp r0, #0 @ failed?
+#if defined(WITH_JIT)
+ /*
+ * The JIT needs the class to be fully resolved before it can
+ * include this instruction in a trace.
+ */
+ ldrh r1, [rSELF, #offThread_subMode]
+ beq common_exceptionThrown @ yes, handle the exception
+ ands r1, #kSubModeJitTraceBuild @ under construction?
+ bne .LOP_NEW_INSTANCE_jitCheck
+#else
+ beq common_exceptionThrown @ yes, handle the exception
+#endif
+.LOP_NEW_INSTANCE_end:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we need to stop the trace building early.
+ * r0: new object
+ * r3: vAA
+ */
+.LOP_NEW_INSTANCE_jitCheck:
+ ldr r1, [r10] @ reload resolved class
+ cmp r1, #0 @ okay?
+ bne .LOP_NEW_INSTANCE_end @ yes, finish
+ mov r9, r0 @ preserve new object
+ mov r10, r3 @ preserve vAA
+ mov r0, rSELF
+ mov r1, rPC
+ bl dvmJitEndTraceSelect @ (self, pc)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r9, r10) @ vAA<- new object
+ GOTO_OPCODE(ip) @ jump to next instruction
+#endif
+
+ /*
+ * Class initialization required.
+ *
+ * r0 holds class object
+ */
+.LOP_NEW_INSTANCE_needinit:
+ mov r9, r0 @ save r0
+ bl dvmInitClass @ initialize class
+ cmp r0, #0 @ check boolean result
+ mov r0, r9 @ restore r0
+ bne .LOP_NEW_INSTANCE_initialized @ success, continue
+ b common_exceptionThrown @ failed, deal with init exception
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r1 holds BBBB
+ */
+.LOP_NEW_INSTANCE_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ bne .LOP_NEW_INSTANCE_resolved @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* continuation for OP_NEW_ARRAY */
+
+
+ /*
+ * Resolve class. (This is an uncommon case.)
+ *
+ * r1 holds array length
+ * r2 holds class ref CCCC
+ */
+.LOP_NEW_ARRAY_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r9, r1 @ r9<- length (save)
+ mov r1, r2 @ r1<- CCCC
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ mov r1, r9 @ r1<- length (restore)
+ beq common_exceptionThrown @ yes, handle exception
+ @ fall through to OP_NEW_ARRAY_finish
+
+ /*
+ * Finish allocation.
+ *
+ * r0 holds class
+ * r1 holds array length
+ */
+.LOP_NEW_ARRAY_finish:
+ mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table
+ bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags)
+ cmp r0, #0 @ failed?
+ mov r2, rINST, lsr #8 @ r2<- A+
+ beq common_exceptionThrown @ yes, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_FILLED_NEW_ARRAY */
+
+ /*
+ * On entry:
+ * r0 holds array class
+ * r10 holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_continue:
+ ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+ mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
+ ldrb rINST, [r3, #1] @ rINST<- descriptor[1]
+ .if 0
+ mov r1, r10 @ r1<- AA (length)
+ .else
+ mov r1, r10, lsr #4 @ r1<- B (length)
+ .endif
+ cmp rINST, #'I' @ array of ints?
+ cmpne rINST, #'L' @ array of objects?
+ cmpne rINST, #'[' @ array of arrays?
+ mov r9, r1 @ save length in r9
+ bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet
+ bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
+ cmp r0, #0 @ null return?
+ beq common_exceptionThrown @ alloc failed, handle exception
+
+ FETCH(r1, 2) @ r1<- FEDC or CCCC
+ str r0, [rSELF, #offThread_retval] @ retval.l <- new array
+ str rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
+ add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+ subs r9, r9, #1 @ length--, check for neg
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ bmi 2f @ was zero, bail
+
+ @ copy values from registers into the array
+ @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+ .if 0
+ add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
+1: ldr r3, [r2], #4 @ r3<- *r2++
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .else
+ cmp r9, #4 @ length was initially 5?
+ and r2, r10, #15 @ r2<- A
+ bne 1f @ <= 4 args, branch
+ GET_VREG(r3, r2) @ r3<- vA
+ sub r9, r9, #1 @ count--
+ str r3, [r0, #16] @ contents[4] = vA
+1: and r2, r1, #15 @ r2<- F/E/D/C
+ GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
+ mov r1, r1, lsr #4 @ r1<- next reg in low 4
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .endif
+
+2:
+ ldr r0, [rSELF, #offThread_retval] @ r0<- object
+ ldr r1, [rSELF, #offThread_retval+4] @ r1<- type
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ cmp r1, #'I' @ Is int array?
+ strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
+ GOTO_OPCODE(ip) @ execute it
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_notimpl:
+ ldr r0, .L_strFilledNewArrayNotImpl_OP_FILLED_NEW_ARRAY
+ bl dvmThrowInternalError
+ b common_exceptionThrown
+
+ /*
+ * Ideally we'd only define this once, but depending on layout we can
+ * exceed the range of the load above.
+ */
+
+.L_strFilledNewArrayNotImpl_OP_FILLED_NEW_ARRAY:
+ .word .LstrFilledNewArrayNotImpl
+
+/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
+
+ /*
+ * On entry:
+ * r0 holds array class
+ * r10 holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_continue:
+ ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+ mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
+ ldrb rINST, [r3, #1] @ rINST<- descriptor[1]
+ .if 1
+ mov r1, r10 @ r1<- AA (length)
+ .else
+ mov r1, r10, lsr #4 @ r1<- B (length)
+ .endif
+ cmp rINST, #'I' @ array of ints?
+ cmpne rINST, #'L' @ array of objects?
+ cmpne rINST, #'[' @ array of arrays?
+ mov r9, r1 @ save length in r9
+ bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet
+ bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
+ cmp r0, #0 @ null return?
+ beq common_exceptionThrown @ alloc failed, handle exception
+
+ FETCH(r1, 2) @ r1<- FEDC or CCCC
+ str r0, [rSELF, #offThread_retval] @ retval.l <- new array
+ str rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
+ add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+ subs r9, r9, #1 @ length--, check for neg
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ bmi 2f @ was zero, bail
+
+ @ copy values from registers into the array
+ @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+ .if 1
+ add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
+1: ldr r3, [r2], #4 @ r3<- *r2++
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .else
+ cmp r9, #4 @ length was initially 5?
+ and r2, r10, #15 @ r2<- A
+ bne 1f @ <= 4 args, branch
+ GET_VREG(r3, r2) @ r3<- vA
+ sub r9, r9, #1 @ count--
+ str r3, [r0, #16] @ contents[4] = vA
+1: and r2, r1, #15 @ r2<- F/E/D/C
+ GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
+ mov r1, r1, lsr #4 @ r1<- next reg in low 4
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .endif
+
+2:
+ ldr r0, [rSELF, #offThread_retval] @ r0<- object
+ ldr r1, [rSELF, #offThread_retval+4] @ r1<- type
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ cmp r1, #'I' @ Is int array?
+ strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
+ GOTO_OPCODE(ip) @ execute it
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
+ ldr r0, .L_strFilledNewArrayNotImpl_OP_FILLED_NEW_ARRAY_RANGE
+ bl dvmThrowInternalError
+ b common_exceptionThrown
+
+ /*
+ * Ideally we'd only define this once, but depending on layout we can
+ * exceed the range of the load above.
+ */
+
+.L_strFilledNewArrayNotImpl_OP_FILLED_NEW_ARRAY_RANGE:
+ .word .LstrFilledNewArrayNotImpl
+
+/* continuation for OP_CMPL_FLOAT */
+.LOP_CMPL_FLOAT_finish:
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CMPG_FLOAT */
+.LOP_CMPG_FLOAT_finish:
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CMPL_DOUBLE */
+.LOP_CMPL_DOUBLE_finish:
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CMPG_DOUBLE */
+.LOP_CMPG_DOUBLE_finish:
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CMP_LONG */
+
+.LOP_CMP_LONG_less:
+ mvn r1, #0 @ r1<- -1
+ @ Want to cond code the next mov so we can avoid branch, but don't see it;
+ @ instead, we just replicate the tail end.
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LOP_CMP_LONG_greater:
+ mov r1, #1 @ r1<- 1
+ @ fall through to _finish
+
+.LOP_CMP_LONG_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_AGET_WIDE */
+
+.LOP_AGET_WIDE_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_APUT_WIDE */
+
+.LOP_APUT_WIDE_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC]
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_APUT_OBJECT */
+ /*
+ * On entry:
+ * rINST = vBB (arrayObj)
+ * r9 = vAA (obj)
+ * r10 = offset into array (vBB + vCC * width)
+ */
+.LOP_APUT_OBJECT_finish:
+ cmp r9, #0 @ storing null reference?
+ beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ ldr r1, [rINST, #offObject_clazz] @ r1<- arrayObj->clazz
+ bl dvmCanPutArrayElement @ test object type vs. array type
+ cmp r0, #0 @ okay?
+ beq .LOP_APUT_OBJECT_throw @ no
+ mov r1, rINST @ r1<- arrayObj
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [rSELF, #offThread_cardTable] @ get biased CT base
+ add r10, #offArrayObject_contents @ r0<- pointer to slot
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r9, [r10] @ vBB[vCC]<- vAA
+ strb r2, [r2, r1, lsr #GC_CARD_SHIFT] @ mark card using object head
+ GOTO_OPCODE(ip) @ jump to next instruction
+.LOP_APUT_OBJECT_skip_check:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+.LOP_APUT_OBJECT_throw:
+ @ The types don't match. We need to throw an ArrayStoreException.
+ ldr r0, [r9, #offObject_clazz]
+ ldr r1, [rINST, #offObject_clazz]
+ EXPORT_PC()
+ bl dvmThrowArrayStoreExceptionIncompatibleElement
+ b common_exceptionThrown
+
+/* continuation for OP_IGET */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_WIDE_finish:
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ .if 0
+ add r0, r9, r3 @ r0<- address of field
+ bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field
+ .else
+ ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok)
+ .endif
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_OBJECT_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_BOOLEAN */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_BOOLEAN_finish:
+ @bl common_squeak1
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_BYTE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_BYTE_finish:
+ @bl common_squeak2
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_CHAR */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_CHAR_finish:
+ @bl common_squeak3
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_SHORT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_SHORT_finish:
+ @bl common_squeak4
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_WIDE_finish:
+ mov r2, rINST, lsr #8 @ r2<- A+
+ cmp r9, #0 @ check object for null
+ and r2, r2, #15 @ r2<- A
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ add r2, rFP, r2, lsl #2 @ r3<- &fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r2, {r0-r1} @ r0/r1<- fp[A]
+ GET_INST_OPCODE(r10) @ extract opcode from rINST
+ .if 0
+ add r2, r9, r3 @ r2<- target address
+ bl dvmQuasiAtomicSwap64Sync @ stores r0/r1 into addr r2
+ .else
+ strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1
+ .endif
+ GOTO_OPCODE(r10) @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_OBJECT_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (32 bits)<- r0
+ @ no-op
+ cmp r0, #0 @ stored a null reference?
+ strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_BOOLEAN */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_BOOLEAN_finish:
+ @bl common_squeak1
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_BYTE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_BYTE_finish:
+ @bl common_squeak2
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_CHAR */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_CHAR_finish:
+ @bl common_squeak3
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_SHORT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_SHORT_finish:
+ @bl common_squeak4
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SGET */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_finish
+
+/* continuation for OP_SGET_WIDE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in r0.
+ */
+.LOP_SGET_WIDE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r1<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_WIDE_finish @ resume
+
+/* continuation for OP_SGET_OBJECT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_OBJECT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_OBJECT_finish
+
+/* continuation for OP_SGET_BOOLEAN */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_BOOLEAN_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_BOOLEAN_finish
+
+/* continuation for OP_SGET_BYTE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_BYTE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_BYTE_finish
+
+/* continuation for OP_SGET_CHAR */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_CHAR_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_CHAR_finish
+
+/* continuation for OP_SGET_SHORT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_SHORT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_SHORT_finish
+
+/* continuation for OP_SPUT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_finish @ resume
+
+/* continuation for OP_SPUT_WIDE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r9: &fp[AA]
+ * r10: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in r2.
+ */
+.LOP_SPUT_WIDE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ mov r2, r0 @ copy to r2
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_WIDE_finish @ resume
+
+/* continuation for OP_SPUT_OBJECT */
+
+
+.LOP_SPUT_OBJECT_end:
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ cmp r1, #0 @ stored a null object?
+ strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card based on obj head
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /* Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_OBJECT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_OBJECT_finish @ resume
+
+
+/* continuation for OP_SPUT_BOOLEAN */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_BOOLEAN_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_BOOLEAN_finish @ resume
+
+/* continuation for OP_SPUT_BYTE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_BYTE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_BYTE_finish @ resume
+
+/* continuation for OP_SPUT_CHAR */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_CHAR_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_CHAR_finish @ resume
+
+/* continuation for OP_SPUT_SHORT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_SHORT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_SHORT_finish @ resume
+
+/* continuation for OP_INVOKE_VIRTUAL */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_continue:
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ cmp r9, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r3, [r9, #offObject_clazz] @ r3<- thisPtr->clazz
+ ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
+ ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodNoRange @ (r0=method, r9="this")
+
+/* continuation for OP_INVOKE_SUPER */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = method->clazz
+ */
+.LOP_INVOKE_SUPER_continue:
+ ldr r1, [r10, #offClassObject_super] @ r1<- method->clazz->super
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
+ EXPORT_PC() @ must export for invoke
+ cmp r2, r3 @ compare (methodIndex, vtableCount)
+ bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass
+ ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
+ ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodNoRange @ continue on
+
+.LOP_INVOKE_SUPER_resolve:
+ mov r0, r10 @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_SUPER_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * r0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_nsm:
+ ldr r1, [r0, #offMethod_name] @ r1<- method name
+ b common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT */
+
+ /*
+ * On entry:
+ * r1 = reference (BBBB or CCCC)
+ * r10 = "this" register
+ */
+.LOP_INVOKE_DIRECT_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_DIRECT @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_DIRECT_finish @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* continuation for OP_INVOKE_STATIC */
+
+
+.LOP_INVOKE_STATIC_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_STATIC @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we're actively building a trace. If so,
+ * we need to keep this instruction out of it.
+ * r10: &resolved_methodToCall
+ */
+ ldrh r2, [rSELF, #offThread_subMode]
+ beq common_exceptionThrown @ null, handle exception
+ ands r2, #kSubModeJitTraceBuild @ trace under construction?
+ beq common_invokeMethodNoRange @ no (r0=method, r9="this")
+ ldr r1, [r10] @ reload resolved method
+ cmp r1, #0 @ finished resolving?
+ bne common_invokeMethodNoRange @ yes (r0=method, r9="this")
+ mov r10, r0 @ preserve method
+ mov r0, rSELF
+ mov r1, rPC
+ bl dvmJitEndTraceSelect @ (self, pc)
+ mov r0, r10
+ b common_invokeMethodNoRange @ whew, finally!
+#else
+ bne common_invokeMethodNoRange @ (r0=method, r9="this")
+ b common_exceptionThrown @ yes, handle exception
+#endif
+
+/* continuation for OP_INVOKE_VIRTUAL_RANGE */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_RANGE_continue:
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ cmp r9, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r3, [r9, #offObject_clazz] @ r3<- thisPtr->clazz
+ ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
+ ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodRange @ (r0=method, r9="this")
+
+/* continuation for OP_INVOKE_SUPER_RANGE */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = method->clazz
+ */
+.LOP_INVOKE_SUPER_RANGE_continue:
+ ldr r1, [r10, #offClassObject_super] @ r1<- method->clazz->super
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
+ EXPORT_PC() @ must export for invoke
+ cmp r2, r3 @ compare (methodIndex, vtableCount)
+ bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass
+ ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
+ ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodRange @ continue on
+
+.LOP_INVOKE_SUPER_RANGE_resolve:
+ mov r0, r10 @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * r0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_RANGE_nsm:
+ ldr r1, [r0, #offMethod_name] @ r1<- method name
+ b common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT_RANGE */
+
+ /*
+ * On entry:
+ * r1 = reference (BBBB or CCCC)
+ * r10 = "this" register
+ */
+.LOP_INVOKE_DIRECT_RANGE_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_DIRECT @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* continuation for OP_INVOKE_STATIC_RANGE */
+
+
+.LOP_INVOKE_STATIC_RANGE_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_STATIC @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we're actively building a trace. If so,
+ * we need to keep this instruction out of it.
+ * r10: &resolved_methodToCall
+ */
+ ldrh r2, [rSELF, #offThread_subMode]
+ beq common_exceptionThrown @ null, handle exception
+ ands r2, #kSubModeJitTraceBuild @ trace under construction?
+ beq common_invokeMethodRange @ no (r0=method, r9="this")
+ ldr r1, [r10] @ reload resolved method
+ cmp r1, #0 @ finished resolving?
+ bne common_invokeMethodRange @ yes (r0=method, r9="this")
+ mov r10, r0 @ preserve method
+ mov r0, rSELF
+ mov r1, rPC
+ bl dvmJitEndTraceSelect @ (self, pc)
+ mov r0, r10
+ b common_invokeMethodRange @ whew, finally!
+#else
+ bne common_invokeMethodRange @ (r0=method, r9="this")
+ b common_exceptionThrown @ yes, handle exception
+#endif
+
+/* continuation for OP_FLOAT_TO_LONG */
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x5f000000 @ (float)maxlong
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffff)
+ mvnne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xdf000000 @ (float)minlong
+ bl __aeabi_fcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (80000000)
+ movne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ ldmeqfd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2lz @ convert float to long
+ ldmfd sp!, {r4, pc}
+
+/* continuation for OP_DOUBLE_TO_LONG */
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ mov r3, #0x43000000 @ maxlong, as a double (high word)
+ add r3, #0x00e00000 @ 0x43e00000
+ mov r2, #0 @ maxlong, as a double (low word)
+ sub sp, sp, #4 @ align for EABI
+ mov r4, r0 @ save a copy of r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffffffffffff)
+ mvnne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r3, #0xc3000000 @ minlong, as a double (high word)
+ add r3, #0x00e00000 @ 0xc3e00000
+ mov r2, #0 @ minlong, as a double (low word)
+ bl __aeabi_dcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (8000000000000000)
+ movne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ beq 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2lz @ convert double to long
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
+
+/* continuation for OP_MUL_LONG */
+
+.LOP_MUL_LONG_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SHL_LONG */
+
+.LOP_SHL_LONG_finish:
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SHR_LONG */
+
+.LOP_SHR_LONG_finish:
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_USHR_LONG */
+
+.LOP_USHR_LONG_finish:
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SHL_LONG_2ADDR */
+
+.LOP_SHL_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SHR_LONG_2ADDR */
+
+.LOP_SHR_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_USHR_LONG_2ADDR */
+
+.LOP_USHR_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_VOLATILE_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ SMP_DMB @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_VOLATILE_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SMP_DMB_ST @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ SMP_DMB
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SGET_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_VOLATILE_finish
+
+/* continuation for OP_SPUT_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_VOLATILE_finish @ resume
+
+/* continuation for OP_IGET_OBJECT_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_OBJECT_VOLATILE_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ SMP_DMB @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_WIDE_VOLATILE_finish:
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ .if 1
+ add r0, r9, r3 @ r0<- address of field
+ bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field
+ .else
+ ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok)
+ .endif
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_WIDE_VOLATILE_finish:
+ mov r2, rINST, lsr #8 @ r2<- A+
+ cmp r9, #0 @ check object for null
+ and r2, r2, #15 @ r2<- A
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ add r2, rFP, r2, lsl #2 @ r3<- &fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r2, {r0-r1} @ r0/r1<- fp[A]
+ GET_INST_OPCODE(r10) @ extract opcode from rINST
+ .if 1
+ add r2, r9, r3 @ r2<- target address
+ bl dvmQuasiAtomicSwap64Sync @ stores r0/r1 into addr r2
+ .else
+ strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1
+ .endif
+ GOTO_OPCODE(r10) @ jump to next instruction
+
+/* continuation for OP_SGET_WIDE_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in r0.
+ */
+.LOP_SGET_WIDE_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r1<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_WIDE_VOLATILE_finish @ resume
+
+/* continuation for OP_SPUT_WIDE_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r9: &fp[AA]
+ * r10: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in r2.
+ */
+.LOP_SPUT_WIDE_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ mov r2, r0 @ copy to r2
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_WIDE_VOLATILE_finish @ resume
+
+/* continuation for OP_EXECUTE_INLINE */
+
+ /*
+ * Extract args, call function.
+ * r0 = #of args (0-4)
+ * r10 = call index
+ * lr = return addr, above [DO NOT bl out of here w/o preserving LR]
+ *
+ * Other ideas:
+ * - Use a jump table from the main piece to jump directly into the
+ * AND/LDR pairs. Costs a data load, saves a branch.
+ * - Have five separate pieces that do the loading, so we can work the
+ * interleave a little better. Increases code size.
+ */
+.LOP_EXECUTE_INLINE_continue:
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(rINST, 2) @ rINST<- FEDC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: and ip, rINST, #0xf000 @ isolate F
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
+3: and ip, rINST, #0x0f00 @ isolate E
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vE
+2: and ip, rINST, #0x00f0 @ isolate D
+ ldr r1, [rFP, ip, lsr #2] @ r1<- vD
+1: and ip, rINST, #0x000f @ isolate C
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vC
+0:
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+ /*
+ * We're debugging or profiling.
+ * r10: opIndex
+ */
+.LOP_EXECUTE_INLINE_debugmode:
+ mov r0, r10
+ bl dvmResolveInlineNative
+ cmp r0, #0 @ did it resolve?
+ beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
+ mov r1, rSELF
+ bl dvmFastMethodTraceEnter @ (method, self)
+ add r1, rSELF, #offThread_retval@ r1<- &self->retval
+ sub sp, sp, #8 @ make room for arg, +64 bit align
+ mov r0, rINST, lsr #12 @ r0<- B
+ str r1, [sp] @ push &self->retval
+ bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
+ add sp, sp, #8 @ pop stack
+ mov r0, r9 @ r0<- method
+ mov r1, rSELF
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+
+.LOP_EXECUTE_INLINE_table:
+ .word gDvmInlineOpsTable
+
+/* continuation for OP_EXECUTE_INLINE_RANGE */
+
+ /*
+ * Extract args, call function.
+ * r0 = #of args (0-4)
+ * r10 = call index
+ * lr = return addr, above [DO NOT bl out of here w/o preserving LR]
+ */
+.LOP_EXECUTE_INLINE_RANGE_continue:
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(r9, 2) @ r9<- CCCC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: add ip, r9, #3 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+3: add ip, r9, #2 @ base+2
+ GET_VREG(r2, ip) @ r2<- vBase[2]
+2: add ip, r9, #1 @ base+1
+ GET_VREG(r1, ip) @ r1<- vBase[1]
+1: add ip, r9, #0 @ (nop)
+ GET_VREG(r0, ip) @ r0<- vBase[0]
+0:
+ ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation
+ ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+
+ /*
+ * We're debugging or profiling.
+ * r10: opIndex
+ */
+.LOP_EXECUTE_INLINE_RANGE_debugmode:
+ mov r0, r10
+ bl dvmResolveInlineNative
+ cmp r0, #0 @ did it resolve?
+ beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
+ mov r1, rSELF
+ bl dvmFastMethodTraceEnter @ (method, self)
+ add r1, rSELF, #offThread_retval@ r1<- &self->retval
+ sub sp, sp, #8 @ make room for arg, +64 bit align
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
+ str r1, [sp] @ push &self->retval
+ bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
+ add sp, sp, #8 @ pop stack
+ mov r0, rINST @ r0<- method
+ mov r1, rSELF
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+
+.LOP_EXECUTE_INLINE_RANGE_table:
+ .word gDvmInlineOpsTable
+
+
+/* continuation for OP_INVOKE_OBJECT_INIT_RANGE */
+
+.LOP_INVOKE_OBJECT_INIT_RANGE_setFinal:
+ EXPORT_PC() @ can throw
+ bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
+ ldr r0, [rSELF, #offThread_exception] @ r0<- self->exception
+ cmp r0, #0 @ exception pending?
+ bne common_exceptionThrown @ yes, handle it
+ b .LOP_INVOKE_OBJECT_INIT_RANGE_finish
+
+ /*
+ * A debugger is attached, so we need to go ahead and do
+ * this. For simplicity, we'll just jump directly to the
+ * corresponding handler. Note that we can't use
+ * rIBASE here because it may be in single-step mode.
+ * Load the primary table base directly.
+ */
+.LOP_INVOKE_OBJECT_INIT_RANGE_debugger:
+ ldr r1, [rSELF, #offThread_mainHandlerTable]
+ mov ip, #OP_INVOKE_DIRECT_RANGE
+ GOTO_OPCODE_BASE(r1,ip) @ execute it
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_OBJECT_VOLATILE_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SMP_DMB_ST @ releasing store
+ str r0, [r9, r3] @ obj.field (32 bits)<- r0
+ SMP_DMB
+ cmp r0, #0 @ stored a null reference?
+ strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SGET_OBJECT_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_OBJECT_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_OBJECT_VOLATILE_finish
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE */
+
+
+.LOP_SPUT_OBJECT_VOLATILE_end:
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ SMP_DMB
+ cmp r1, #0 @ stored a null object?
+ strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card based on obj head
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /* Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_OBJECT_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_OBJECT_VOLATILE_finish @ resume
+
+
+ .size dvmAsmSisterStart, .-dvmAsmSisterStart
+ .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
+
+ .global dvmAsmAltInstructionStart
+ .type dvmAsmAltInstructionStart, %function
+ .text
+
+dvmAsmAltInstructionStart = .L_ALT_OP_NOP
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NOP: /* 0x00 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (0 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE: /* 0x01 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (1 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (2 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (3 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (4 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (5 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (6 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (7 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (8 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (9 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (10 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (11 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (12 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (13 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (14 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN: /* 0x0f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (15 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (16 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (17 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_4: /* 0x12 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (18 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_16: /* 0x13 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (19 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST: /* 0x14 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (20 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (21 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (22 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (23 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (24 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (25 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (26 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (27 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (28 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (29 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (30 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (31 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (32 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (33 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (34 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (35 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (36 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (37 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (38 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_THROW: /* 0x27 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (39 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_GOTO: /* 0x28 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (40 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (41 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (42 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (43 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (44 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (45 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (46 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (47 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (48 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (49 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (50 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_NE: /* 0x33 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (51 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_LT: /* 0x34 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (52 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_GE: /* 0x35 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (53 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_GT: /* 0x36 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (54 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_LE: /* 0x37 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (55 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (56 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (57 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (58 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (59 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (60 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (61 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (62 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (63 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (64 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (65 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (66 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (67 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET: /* 0x44 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (68 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (69 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (70 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (71 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (72 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (73 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (74 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT: /* 0x4b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (75 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (76 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (77 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (78 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (79 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (80 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (81 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET: /* 0x52 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (82 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (83 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (84 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (85 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (86 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (87 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (88 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT: /* 0x59 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (89 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (90 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (91 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (92 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (93 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (94 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (95 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET: /* 0x60 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (96 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (97 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (98 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (99 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (100 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (101 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (102 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT: /* 0x67 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (103 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (104 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (105 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (106 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (107 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (108 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (109 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (110 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (111 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (112 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (113 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (114 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (115 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (116 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (117 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (118 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (119 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (120 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (121 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (122 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (123 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (124 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (125 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (126 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (127 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (128 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (129 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (130 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (131 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (132 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (133 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (134 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (135 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (136 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (137 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (138 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (139 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (140 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (141 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (142 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (143 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (144 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (145 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (146 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (147 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_INT: /* 0x94 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (148 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_INT: /* 0x95 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (149 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_INT: /* 0x96 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (150 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (151 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (152 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (153 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (154 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (155 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (156 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (157 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (158 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (159 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (160 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (161 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (162 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (163 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (164 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (165 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (166 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (167 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (168 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (169 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (170 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (171 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (172 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (173 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (174 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (175 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (176 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (177 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (178 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (179 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (180 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (181 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (182 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (183 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (184 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (185 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (186 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (187 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (188 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (189 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (190 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (191 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (192 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (193 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (194 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (195 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (196 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (197 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (198 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (199 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (200 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (201 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (202 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (203 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (204 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (205 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (206 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (207 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (208 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (209 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (210 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (211 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (212 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (213 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (214 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (215 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (216 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (217 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (218 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (219 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (220 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (221 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (222 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (223 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (224 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (225 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (226 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (227 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (228 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (229 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (230 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (231 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (232 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (233 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (234 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (235 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_BREAKPOINT: /* 0xec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (236 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (237 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (238 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (239 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (240 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (241 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (242 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (243 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (244 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (245 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (246 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (247 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (248 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (249 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (250 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (251 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (252 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (253 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (254 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_FF: /* 0xff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (255 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+ .balign 64
+ .size dvmAsmAltInstructionStart, .-dvmAsmAltInstructionStart
+ .global dvmAsmAltInstructionEnd
+dvmAsmAltInstructionEnd:
+/* File: armv5te/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+#if defined(WITH_JIT)
+
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * "longjmp" to a translation after single-stepping. Before returning
+ * to translation, must save state for self-verification.
+ */
+ .global dvmJitResumeTranslation @ (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+ mov rSELF, r0 @ restore self
+ mov rPC, r1 @ restore Dalvik pc
+ mov rFP, r2 @ restore Dalvik fp
+ ldr r10, [rSELF,#offThread_jitResumeNPC] @ resume address
+ mov r2, #0
+ str r2, [rSELF,#offThread_jitResumeNPC] @ reset resume address
+ ldr sp, [rSELF,#offThread_jitResumeNSP] @ cut back native stack
+ b jitSVShadowRunStart @ resume as if cache hit
+ @ expects resume addr in r10
+
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ mov r2,#kSVSPunt @ r2<- interpreter entry point
+ mov r3, #0
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ mov rPC, r0 @ set up dalvik pc
+ EXPORT_PC()
+ str lr, [rSELF,#offThread_jitResumeNPC]
+ str sp, [rSELF,#offThread_jitResumeNSP]
+ str r1, [rSELF,#offThread_jitResumeDPC]
+ mov r2,#kSVSSingleStep @ r2<- interpreter entry point
+ b jitSVShadowRunEnd @ doesn't return
+
+
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSNoProfile @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ ldr r0,[lr, #-1] @ pass our target PC
+ mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpBackwardBranch
+dvmJitToInterpBackwardBranch:
+ ldr r0,[lr, #-1] @ pass our target PC
+ mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ ldr r0,[lr, #-1] @ pass our target PC
+ mov r2,#kSVSNormal @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSNoChain @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+#else
+
+/*
+ * "longjmp" to a translation after single-stepping.
+ */
+ .global dvmJitResumeTranslation @ (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+ mov rSELF, r0 @ restore self
+ mov rPC, r1 @ restore Dalvik pc
+ mov rFP, r2 @ restore Dalvik fp
+ ldr r0, [rSELF,#offThread_jitResumeNPC]
+ mov r2, #0
+ str r2, [rSELF,#offThread_jitResumeNPC] @ reset resume address
+ ldr sp, [rSELF,#offThread_jitResumeNSP] @ cut back native stack
+ bx r0 @ resume translation
+
+/*
+ * Return from the translation cache to the interpreter when the compiler is
+ * having issues translating/executing a Dalvik instruction. We have to skip
+ * the code cache lookup otherwise it is possible to indefinitely bouce
+ * between the interpreter and the code cache if the instruction that fails
+ * to be compiled happens to be at a trace start.
+ */
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ mov rPC, r0
+#if defined(WITH_JIT_TUNING)
+ mov r0,lr
+ bl dvmBumpPunt;
+#endif
+ EXPORT_PC()
+ mov r0, #0
+ str r0, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+
+/*
+ * Return to the interpreter to handle a single instruction.
+ * We'll use the normal single-stepping mechanism via interpBreak,
+ * but also save the native pc of the resume point in the translation
+ * and the native sp so that we can later do the equivalent of a
+ * longjmp() to resume.
+ * On entry:
+ * dPC <= Dalvik PC of instrucion to interpret
+ * lr <= resume point in translation
+ * r1 <= Dalvik PC of next instruction
+ */
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ mov rPC, r0 @ set up dalvik pc
+ EXPORT_PC()
+ str lr, [rSELF,#offThread_jitResumeNPC]
+ str sp, [rSELF,#offThread_jitResumeNSP]
+ str r1, [rSELF,#offThread_jitResumeDPC]
+ mov r1, #1
+ str r1, [rSELF,#offThread_singleStepCount] @ just step once
+ mov r0, rSELF
+ mov r1, #kSubModeCountedStep
+ bl dvmEnableSubMode @ (self, newMode)
+ ldr rIBASE, [rSELF,#offThread_curHandlerTable]
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target. Commonly used for callees.
+ */
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+#if defined(WITH_JIT_TUNING)
+ bl dvmBumpNoChain
+#endif
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0 @ !0 means translation exists
+ bxne r0 @ continue native execution if so
+ b 2f @ branch over to use the interpreter
+
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target. Commonly used following
+ * invokes.
+ */
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ ldr rPC,[lr, #-1] @ get our target PC
+ add rINST,lr,#-5 @ save start of chain branch
+ add rINST, #-4 @ .. which is 9 bytes back
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ cmp r0,#0
+ beq 2f
+ mov r1,rINST
+ bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0 @ successful chain?
+ bxne r0 @ continue native execution
+ b toInterpreter @ didn't chain - resume with interpreter
+
+/* No translation, so request one if profiling isn't disabled*/
+2:
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ FETCH_INST()
+ cmp r0, #0
+ movne r2,#kJitTSelectRequestHot @ ask for trace selection
+ bne common_selectTrace
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+
+/*
+ * Return from the translation cache to the interpreter.
+ * The return was done with a BLX from thumb mode, and
+ * the following 32-bit word contains the target rPC value.
+ * Note that lr (r14) will have its low-order bit set to denote
+ * its thumb-mode origin.
+ *
+ * We'll need to stash our lr origin away, recover the new
+ * target and then check to see if there is a translation available
+ * for our new target. If so, we do a translation chain and
+ * go back to native execution. Otherwise, it's back to the
+ * interpreter (after treating this entry as a potential
+ * trace start).
+ */
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ ldr rPC,[lr, #-1] @ get our target PC
+ add rINST,lr,#-5 @ save start of chain branch
+ add rINST,#-4 @ .. which is 9 bytes back
+#if defined(WITH_JIT_TUNING)
+ bl dvmBumpNormal
+#endif
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ cmp r0,#0
+ beq toInterpreter @ go if not, otherwise do chain
+ mov r1,rINST
+ bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0 @ successful chain?
+ bxne r0 @ continue native execution
+ b toInterpreter @ didn't chain - resume with interpreter
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+#if defined(WITH_JIT_TUNING)
+ bl dvmBumpNoChain
+#endif
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0
+ bxne r0 @ continue native execution if so
+ EXPORT_PC()
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ FETCH_INST()
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+#if defined(WITH_JIT_TUNING)
+ bl dvmBumpNoChain
+#endif
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0
+ bxne r0 @ continue native execution if so
+#endif
+
+/*
+ * No translation, restore interpreter regs and start interpreting.
+ * rSELF & rFP were preserved in the translated code, and rPC has
+ * already been restored by the time we get here. We'll need to set
+ * up rIBASE & rINST, and load the address of the JitTable into r0.
+ */
+toInterpreter:
+ EXPORT_PC()
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ FETCH_INST()
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ @ NOTE: intended fallthrough
+
+/*
+ * Similar to common_updateProfile, but tests for null pJitProfTable
+ * r0 holds pJifProfTAble, rINST is loaded, rPC is current and
+ * rIBASE has been recently refreshed.
+ */
+common_testUpdateProfile:
+ cmp r0, #0 @ JIT switched off?
+ beq 4f @ return to interp if so
+
+/*
+ * Common code to update potential trace start counter, and initiate
+ * a trace-build if appropriate.
+ * On entry here:
+ * r0 <= pJitProfTable (verified non-NULL)
+ * rPC <= Dalvik PC
+ * rINST <= next instruction
+ */
+common_updateProfile:
+ eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function
+ lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits
+ ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter
+ GET_INST_OPCODE(ip)
+ subs r1,r1,#1 @ decrement counter
+ strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it
+ GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */
+
+ /* Looks good, reset the counter */
+ ldr r1, [rSELF, #offThread_jitThreshold]
+ strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
+ EXPORT_PC()
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0
+#if !defined(WITH_SELF_VERIFICATION)
+ bxne r0 @ jump to the translation
+ mov r2,#kJitTSelectRequest @ ask for trace selection
+ @ fall-through to common_selectTrace
+#else
+ moveq r2,#kJitTSelectRequest @ ask for trace selection
+ beq common_selectTrace
+ /*
+ * At this point, we have a target translation. However, if
+ * that translation is actually the interpret-only pseudo-translation
+ * we want to treat it the same as no translation.
+ */
+ mov r10, r0 @ save target
+ bl dvmCompilerGetInterpretTemplate
+ cmp r0, r10 @ special case?
+ bne jitSVShadowRunStart @ set up self verification shadow space
+ @ Need to clear the inJitCodeCache flag
+ mov r3, #0 @ 0 means not in the JIT code cache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+ /* no return */
+#endif
+
+/*
+ * On entry:
+ * r2 is jit state.
+ */
+common_selectTrace:
+ ldrh r0,[rSELF,#offThread_subMode]
+ ands r0, #(kSubModeJitTraceBuild | kSubModeJitSV)
+ bne 3f @ already doing JIT work, continue
+ str r2,[rSELF,#offThread_jitState]
+ mov r0, rSELF
+/*
+ * Call out to validate trace-building request. If successful,
+ * rIBASE will be swapped to to send us into single-stepping trace
+ * building mode, so we need to refresh before we continue.
+ */
+ EXPORT_PC()
+ SAVE_PC_FP_TO_SELF() @ copy of pc/fp to Thread
+ bl dvmJitCheckTraceRequest
+3:
+ FETCH_INST()
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+4:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip)
+ /* no return */
+#endif
+
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ * On entry:
+ * rPC, rFP, rSELF: the values that they should contain
+ * r10: the address of the target translation.
+ */
+jitSVShadowRunStart:
+ mov r0,rPC @ r0<- program counter
+ mov r1,rFP @ r1<- frame pointer
+ mov r2,rSELF @ r2<- self (Thread) pointer
+ mov r3,r10 @ r3<- target translation
+ bl dvmSelfVerificationSaveState @ save registers to shadow space
+ ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
+ bx r10 @ jump to the translation
+
+/*
+ * Restore PC, registers, and interpreter state to original values
+ * before jumping back to the interpreter.
+ * On entry:
+ * r0: dPC
+ * r2: self verification state
+ */
+jitSVShadowRunEnd:
+ mov r1,rFP @ pass ending fp
+ mov r3,rSELF @ pass self ptr for convenience
+ bl dvmSelfVerificationRestoreState @ restore pc and fp values
+ LOAD_PC_FP_FROM_SELF() @ restore pc, fp
+ ldr r1,[r0,#offShadowSpace_svState] @ get self verification state
+ cmp r1,#0 @ check for punt condition
+ beq 1f
+ @ Set up SV single-stepping
+ mov r0, rSELF
+ mov r1, #kSubModeJitSV
+ bl dvmEnableSubMode @ (self, subMode)
+ mov r2,#kJitSelfVerification @ ask for self verification
+ str r2,[rSELF,#offThread_jitState]
+ @ intentional fallthrough
+1: @ exit to interpreter without check
+ EXPORT_PC()
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+#endif
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ * It will end this interpreter activation, and return to the caller
+ * of dvmMterpStdRun.
+ *
+ * State registers will be saved to the "thread" area before bailing
+ * debugging purposes
+ */
+common_gotoBail:
+ SAVE_PC_FP_TO_SELF() @ export state to "thread"
+ mov r0, rSELF @ r0<- self ptr
+ b dvmMterpStdBail @ call(self, changeInterp)
+
+/*
+ * The JIT's invoke method needs to remember the callsite class and
+ * target pair. Save them here so that they are available to
+ * dvmCheckJit following the interpretation of this invoke.
+ */
+#if defined(WITH_JIT)
+save_callsiteinfo:
+ cmp r9, #0
+ ldrne r9, [r9, #offObject_clazz]
+ str r0, [rSELF, #offThread_methodToCall]
+ str r9, [rSELF, #offThread_callsiteClass]
+ bx lr
+#endif
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", r9 is "this"
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+#if defined(WITH_JIT)
+ ldrh r1, [rSELF, #offThread_subMode]
+ ands r1, #kSubModeJitTraceBuild
+ blne save_callsiteinfo
+#endif
+ @ prepare to copy args to "outs" area of current frame
+ movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero
+ SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
+ beq .LinvokeArgsDone @ if no args, skip the rest
+ FETCH(r1, 2) @ r1<- CCCC
+
+.LinvokeRangeArgs:
+ @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
+ @ (very few methods have > 10 args; could unroll for common cases)
+ add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC]
+ sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args
+1: ldr r1, [r3], #4 @ val = *fp++
+ subs r2, r2, #1 @ count--
+ str r1, [r10], #4 @ *outs++ = val
+ bne 1b @ ...while count != 0
+ b .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", r9 is "this"
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+#if defined(WITH_JIT)
+ ldrh r1, [rSELF, #offThread_subMode]
+ ands r1, #kSubModeJitTraceBuild
+ blne save_callsiteinfo
+#endif
+ @ prepare to copy args to "outs" area of current frame
+ movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero
+ SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
+ FETCH(r1, 2) @ r1<- GFED (load here to hide latency)
+ beq .LinvokeArgsDone
+
+ @ r0=methodToCall, r1=GFED, r2=count, r10=outs
+.LinvokeNonRange:
+ rsb r2, r2, #5 @ r2<- 5-r2
+ add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+5: and ip, rINST, #0x0f00 @ isolate A
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2)
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vA
+4: and ip, r1, #0xf000 @ isolate G
+ ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2)
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vG
+3: and ip, r1, #0x0f00 @ isolate F
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vF
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vF
+2: and ip, r1, #0x00f0 @ isolate E
+ ldr r2, [rFP, ip, lsr #2] @ r2<- vE
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vE
+1: and ip, r1, #0x000f @ isolate D
+ ldr r2, [rFP, ip, lsl #2] @ r2<- vD
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vD
+0: @ fall through to .LinvokeArgsDone
+
+.LinvokeArgsDone: @ r0=methodToCall
+ ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize
+ ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize
+ ldr r2, [r0, #offMethod_insns] @ r2<- method->insns
+ ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz
+ @ find space for the new stack frame, check for overflow
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea
+@ bl common_dumpRegs
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize)
+ cmp r3, r9 @ bottom < interpStackEnd?
+ ldrh lr, [rSELF, #offThread_subMode]
+ ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
+ blo .LstackOverflow @ yes, this frame will overflow stack
+
+ @ set up newSaveArea
+#ifdef EASY_GDB
+ SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area
+ str ip, [r10, #offStackSaveArea_prevSave]
+#endif
+ str rFP, [r10, #offStackSaveArea_prevFrame]
+ str rPC, [r10, #offStackSaveArea_savedPc]
+#if defined(WITH_JIT)
+ mov r9, #0
+ str r9, [r10, #offStackSaveArea_returnAddr]
+#endif
+ str r0, [r10, #offStackSaveArea_method]
+
+ @ Profiling?
+ cmp lr, #0 @ any special modes happening?
+ bne 2f @ go if so
+1:
+ tst r3, #ACC_NATIVE
+ bne .LinvokeNative
+
+ /*
+ stmfd sp!, {r0-r3}
+ bl common_printNewline
+ mov r0, rFP
+ mov r1, #0
+ bl dvmDumpFp
+ ldmfd sp!, {r0-r3}
+ stmfd sp!, {r0-r3}
+ mov r0, r1
+ mov r1, r10
+ bl dvmDumpFp
+ bl common_printNewline
+ ldmfd sp!, {r0-r3}
+ */
+
+ ldrh r9, [r2] @ r9 <- load INST from new PC
+ ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+ mov rPC, r2 @ publish new rPC
+
+ @ Update state values for the new method
+ @ r0=methodToCall, r1=newFp, r3=newMethodClass, r9=newINST
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov r2, #1
+ str r2, [rSELF, #offThread_debugIsMethodEntry]
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ mov rFP, r1 @ fp = newFp
+ GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9
+ mov rINST, r9 @ publish new rINST
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ cmp r0,#0
+ bne common_updateProfile
+ GOTO_OPCODE(ip) @ jump to next instruction
+#else
+ mov rFP, r1 @ fp = newFp
+ GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9
+ mov rINST, r9 @ publish new rINST
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ GOTO_OPCODE(ip) @ jump to next instruction
+#endif
+
+2:
+ @ Profiling - record method entry. r0: methodToCall
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ mov r1, r0
+ mov r0, rSELF
+ bl dvmReportInvoke @ (self, method)
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+ b 1b
+
+.LinvokeNative:
+ @ Prep for the native call
+ @ r0=methodToCall, r1=newFp, r10=newSaveArea
+ ldrh lr, [rSELF, #offThread_subMode]
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
+ mov r2, r0 @ r2<- methodToCall
+ mov r0, r1 @ r0<- newFp (points to args)
+ add r1, rSELF, #offThread_retval @ r1<- &retval
+ mov r3, rSELF @ arg3<- self
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ b .Lskip
+ .type dalvik_mterp, %function
+dalvik_mterp:
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+.Lskip:
+#endif
+
+ cmp lr, #0 @ any special SubModes active?
+ bne 11f @ go handle them if so
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
+7:
+
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ bne common_exceptionThrown @ no, handle exception
+
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+11:
+ @ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
+ stmfd sp!, {r0-r3} @ save all but subModes
+ mov r0, r2 @ r0<- methodToCall
+ mov r1, rSELF
+ mov r2, rFP
+ bl dvmReportPreNativeInvoke @ (methodToCall, self, fp)
+ ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
+
+ @ Call the native method
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
+
+ @ Restore the pre-call arguments
+ ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
+
+ @ Finish up any post-invoke subMode requirements
+ mov r0, r2 @ r0<- methodToCall
+ mov r1, rSELF
+ mov r2, rFP
+ bl dvmReportPostNativeInvoke @ (methodToCall, self, fp)
+ b 7b @ resume
+
+.LstackOverflow: @ r0=methodToCall
+ mov r1, r0 @ r1<- methodToCall
+ mov r0, rSELF @ r0<- self
+ bl dvmHandleStackOverflow
+ b common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+ .fnend
+ .size dalvik_mterp, .-dalvik_mterp
+#endif
+
+
+ /*
+ * Common code for method invocation, calling through "glue code".
+ *
+ * TODO: now that we have range and non-range invoke handlers, this
+ * needs to be split into two. Maybe just create entry points
+ * that set r9 and jump here?
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ * r9 is "bool methodCallRange", indicating if this is a /range variant
+ */
+ .if 0
+.LinvokeOld:
+ sub sp, sp, #8 @ space for args + pad
+ FETCH(ip, 2) @ ip<- FEDC or CCCC
+ mov r2, r0 @ A2<- methodToCall
+ mov r0, rSELF @ A0<- self
+ SAVE_PC_FP_TO_SELF() @ export state to "self"
+ mov r1, r9 @ A1<- methodCallRange
+ mov r3, rINST, lsr #8 @ A3<- AA
+ str ip, [sp, #0] @ A4<- ip
+ bl dvmMterp_invokeMethod @ call the C invokeMethod
+ add sp, sp, #8 @ remove arg area
+ b common_resumeAfterGlueCall @ continue to next instruction
+ .endif
+
+
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+ ldrh lr, [rSELF, #offThread_subMode]
+ SAVEAREA_FROM_FP(r0, rFP)
+ ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
+ cmp lr, #0 @ any special subMode handling needed?
+ bne 19f
+14:
+ ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
+ ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ @ r2<- method we're returning to
+ cmp r2, #0 @ is this a break frame?
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ beq 15f
+ ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+15:
+#else
+ ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+#endif
+ beq common_gotoBail @ break frame, bail out completely
+
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
+ str r2, [rSELF, #offThread_method]@ self->method = newSave->method
+ ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+#if defined(WITH_JIT)
+ ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
+ mov rPC, r9 @ publish new rPC
+ str r1, [rSELF, #offThread_methodClassDex]
+ str r10, [rSELF, #offThread_inJitCodeCache] @ may return to JIT'ed land
+ cmp r10, #0 @ caller is compiled code
+ blxne r10
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+#else
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ mov rPC, r9 @ publish new rPC
+ str r1, [rSELF, #offThread_methodClassDex]
+ GOTO_OPCODE(ip) @ jump to next instruction
+#endif
+
+19:
+ @ Handle special actions
+ @ On entry, r0: StackSaveArea
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_curFrame] @ update interpSave.curFrame
+ mov r0, rSELF
+ bl dvmReportReturn @ (self)
+ SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
+ b 14b @ continue
+
+ /*
+ * Return handling, calls through "glue code".
+ */
+ .if 0
+.LreturnOld:
+ SAVE_PC_FP_TO_SELF() @ export state
+ mov r0, rSELF @ arg to function
+ bl dvmMterp_returnFromMethod
+ b common_resumeAfterGlueCall
+ .endif
+
+
+/*
+ * Somebody has thrown an exception. Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+ .global dvmMterpCommonExceptionThrown
+dvmMterpCommonExceptionThrown:
+common_exceptionThrown:
+.LexceptionNew:
+
+ EXPORT_PC()
+
+ mov r0, rSELF
+ bl dvmCheckSuspendPending
+
+ ldr r9, [rSELF, #offThread_exception] @ r9<- self->exception
+ mov r1, rSELF @ r1<- self
+ mov r0, r9 @ r0<- exception
+ bl dvmAddTrackedAlloc @ don't let the exception be GCed
+ ldrh r2, [rSELF, #offThread_subMode] @ get subMode flags
+ mov r3, #0 @ r3<- NULL
+ str r3, [rSELF, #offThread_exception] @ self->exception = NULL
+
+ @ Special subMode?
+ cmp r2, #0 @ any special subMode handling needed?
+ bne 7f @ go if so
+8:
+ /* set up args and a local for "&fp" */
+ /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */
+ str rFP, [sp, #-4]! @ *--sp = fp
+ mov ip, sp @ ip<- &fp
+ mov r3, #0 @ r3<- false
+ str ip, [sp, #-4]! @ *--sp = &fp
+ ldr r1, [rSELF, #offThread_method] @ r1<- self->method
+ mov r0, rSELF @ r0<- self
+ ldr r1, [r1, #offMethod_insns] @ r1<- method->insns
+ ldrh lr, [rSELF, #offThread_subMode] @ lr<- subMode flags
+ mov r2, r9 @ r2<- exception
+ sub r1, rPC, r1 @ r1<- pc - method->insns
+ mov r1, r1, asr #1 @ r1<- offset in code units
+
+ /* call, r0 gets catchRelPc (a code-unit offset) */
+ bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp)
+
+ /* fix earlier stack overflow if necessary; may trash rFP */
+ ldrb r1, [rSELF, #offThread_stackOverflowed]
+ cmp r1, #0 @ did we overflow earlier?
+ beq 1f @ no, skip ahead
+ mov rFP, r0 @ save relPc result in rFP
+ mov r0, rSELF @ r0<- self
+ mov r1, r9 @ r1<- exception
+ bl dvmCleanupStackOverflow @ call(self)
+ mov r0, rFP @ restore result
+1:
+
+ /* update frame pointer and check result from dvmFindCatchBlock */
+ ldr rFP, [sp, #4] @ retrieve the updated rFP
+ cmp r0, #0 @ is catchRelPc < 0?
+ add sp, sp, #8 @ restore stack
+ bmi .LnotCaughtLocally
+
+ /* adjust locals to match self->interpSave.curFrame and updated PC */
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area
+ ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method
+ str r1, [rSELF, #offThread_method] @ self->method = new method
+ ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz
+ ldr r3, [r1, #offMethod_insns] @ r3<- method->insns
+ ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
+ add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc
+ str r2, [rSELF, #offThread_methodClassDex] @ self->pDvmDex = meth...
+
+ /* release the tracked alloc on the exception */
+ mov r0, r9 @ r0<- exception
+ mov r1, rSELF @ r1<- self
+ bl dvmReleaseTrackedAlloc @ release the exception
+
+ /* restore the exception if the handler wants it */
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"?
+ streq r9, [rSELF, #offThread_exception] @ yes, restore the exception
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ @ Manage debugger bookkeeping
+7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_curFrame] @ update interpSave.curFrame
+ mov r0, rSELF @ arg0<- self
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
+ b 8b @ resume with normal handling
+
+.LnotCaughtLocally: @ r9=exception
+ /* fix stack overflow if necessary */
+ ldrb r1, [rSELF, #offThread_stackOverflowed]
+ cmp r1, #0 @ did we overflow earlier?
+ movne r0, rSELF @ if yes: r0<- self
+ movne r1, r9 @ if yes: r1<- exception
+ blne dvmCleanupStackOverflow @ if yes: call(self)
+
+ @ may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+ /* call __android_log_print(prio, tag, format, ...) */
+ /* "Exception %s from %s:%d not caught locally" */
+ @ dvmLineNumFromPC(method, pc - method->insns)
+ ldr r0, [rSELF, #offThread_method]
+ ldr r1, [r0, #offMethod_insns]
+ sub r1, rPC, r1
+ asr r1, r1, #1
+ bl dvmLineNumFromPC
+ str r0, [sp, #-4]!
+ @ dvmGetMethodSourceFile(method)
+ ldr r0, [rSELF, #offThread_method]
+ bl dvmGetMethodSourceFile
+ str r0, [sp, #-4]!
+ @ exception->clazz->descriptor
+ ldr r3, [r9, #offObject_clazz]
+ ldr r3, [r3, #offClassObject_descriptor]
+ @
+ ldr r2, strExceptionNotCaughtLocally
+ ldr r1, strLogTag
+ mov r0, #3 @ LOG_DEBUG
+ bl __android_log_print
+#endif
+ str r9, [rSELF, #offThread_exception] @ restore exception
+ mov r0, r9 @ r0<- exception
+ mov r1, rSELF @ r1<- self
+ bl dvmReleaseTrackedAlloc @ release the exception
+ b common_gotoBail @ bail out
+
+
+ /*
+ * Exception handling, calls through "glue code".
+ */
+ .if 0
+.LexceptionOld:
+ SAVE_PC_FP_TO_SELF() @ export state
+ mov r0, rSELF @ arg to function
+ bl dvmMterp_exceptionThrown
+ b common_resumeAfterGlueCall
+ .endif
+
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including the current
+ * instruction.
+ *
+ * On entry:
+ * r10: &dvmDex->pResFields[field]
+ * r0: field pointer (must preserve)
+ */
+common_verifyField:
+ ldrh r3, [rSELF, #offThread_subMode] @ r3 <- submode byte
+ ands r3, #kSubModeJitTraceBuild
+ bxeq lr @ Not building trace, continue
+ ldr r1, [r10] @ r1<- reload resolved StaticField ptr
+ cmp r1, #0 @ resolution complete?
+ bxne lr @ yes, continue
+ stmfd sp!, {r0-r2,lr} @ save regs
+ mov r0, rSELF
+ mov r1, rPC
+ bl dvmJitEndTraceSelect @ (self,pc) end trace before this inst
+ ldmfd sp!, {r0-r2, lr}
+ bx lr @ return
+#endif
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+ LOAD_PC_FP_FROM_SELF() @ pull rPC and rFP out of thread
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/*
+ * Invalid array index. Note that our calling convention is strange; we use r1
+ * and r3 because those just happen to be the registers all our callers are
+ * using. We move r3 before calling the C function, but r1 happens to match.
+ * r1: index
+ * r3: size
+ */
+common_errArrayIndex:
+ EXPORT_PC()
+ mov r0, r3
+ bl dvmThrowArrayIndexOutOfBoundsException
+ b common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+ ldr r0, strDivideByZero
+ bl dvmThrowArithmeticException
+ b common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ * On entry: length in r1
+ */
+common_errNegativeArraySize:
+ EXPORT_PC()
+ mov r0, r1 @ arg0 <- len
+ bl dvmThrowNegativeArraySizeException @ (len)
+ b common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ * On entry: method name in r1
+ */
+common_errNoSuchMethod:
+ EXPORT_PC()
+ mov r0, r1
+ bl dvmThrowNoSuchMethodError
+ b common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one. We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+ EXPORT_PC()
+ mov r0, #0
+ bl dvmThrowNullPointerException
+ b common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault. The source address will
+ * be in lr (use a bl instruction to jump here).
+ */
+common_abort:
+ ldr pc, .LdeadFood
+.LdeadFood:
+ .word 0xdeadf00d
+
+/*
+ * Spit out a "we were here", preserving all registers. (The attempt
+ * to save ip won't work, but we need to save an even number of
+ * registers for EABI 64-bit stack alignment.)
+ */
+ .macro SQUEAK num
+common_squeak\num:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ ldr r0, strSqueak
+ mov r1, #\num
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+ .endm
+
+ SQUEAK 0
+ SQUEAK 1
+ SQUEAK 2
+ SQUEAK 3
+ SQUEAK 4
+ SQUEAK 5
+
+/*
+ * Spit out the number in r0, preserving registers.
+ */
+common_printNum:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r1, r0
+ ldr r0, strSqueak
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ ldr r0, strNewline
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+ /*
+ * Print the 32-bit quantity in r0 as a hex value, preserving registers.
+ */
+common_printHex:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r1, r0
+ ldr r0, strPrintHex
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print the 64-bit quantity in r0-r1, preserving registers.
+ */
+common_printLong:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r3, r1
+ mov r2, r0
+ ldr r0, strPrintLong
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print full method info. Pass the Method* in r0. Preserves regs.
+ */
+common_printMethod:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bl dvmMterpPrintMethod
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info. Requires the C function to be compiled in.
+ */
+ .if 0
+common_dumpRegs:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bl dvmMterpDumpArmRegs
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+ .endif
+
+#if 0
+/*
+ * Experiment on VFP mode.
+ *
+ * uint32_t setFPSCR(uint32_t val, uint32_t mask)
+ *
+ * Updates the bits specified by "mask", setting them to the values in "val".
+ */
+setFPSCR:
+ and r0, r0, r1 @ make sure no stray bits are set
+ fmrx r2, fpscr @ get VFP reg
+ mvn r1, r1 @ bit-invert mask
+ and r2, r2, r1 @ clear masked bits
+ orr r2, r2, r0 @ set specified bits
+ fmxr fpscr, r2 @ set VFP reg
+ mov r0, r2 @ return new value
+ bx lr
+
+ .align 2
+ .global dvmConfigureFP
+ .type dvmConfigureFP, %function
+dvmConfigureFP:
+ stmfd sp!, {ip, lr}
+ /* 0x03000000 sets DN/FZ */
+ /* 0x00009f00 clears the six exception enable flags */
+ bl common_squeak0
+ mov r0, #0x03000000 @ r0<- 0x03000000
+ add r1, r0, #0x9f00 @ r1<- 0x03009f00
+ bl setFPSCR
+ ldmfd sp!, {ip, pc}
+#endif
+
+
+/*
+ * String references, must be close to the code that uses them.
+ */
+ .align 2
+strDivideByZero:
+ .word .LstrDivideByZero
+strLogTag:
+ .word .LstrLogTag
+strExceptionNotCaughtLocally:
+ .word .LstrExceptionNotCaughtLocally
+
+strNewline:
+ .word .LstrNewline
+strSqueak:
+ .word .LstrSqueak
+strPrintHex:
+ .word .LstrPrintHex
+strPrintLong:
+ .word .LstrPrintLong
+
+/*
+ * Zero-terminated ASCII string data.
+ *
+ * On ARM we have two choices: do like gcc does, and LDR from a .word
+ * with the address, or use an ADR pseudo-op to get the address
+ * directly. ADR saves 4 bytes and an indirection, but it's using a
+ * PC-relative addressing mode and hence has a limited range, which
+ * makes it not work well with mergeable string sections.
+ */
+ .section .rodata.str1.4,"aMS",%progbits,1
+
+.LstrBadEntryPoint:
+ .asciz "Bad entry point %d\n"
+.LstrFilledNewArrayNotImpl:
+ .asciz "filled-new-array only implemented for objects and 'int'"
+.LstrDivideByZero:
+ .asciz "divide by zero"
+.LstrLogTag:
+ .asciz "mterp"
+.LstrExceptionNotCaughtLocally:
+ .asciz "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+ .asciz "\n"
+.LstrSqueak:
+ .asciz "<%d>"
+.LstrPrintHex:
+ .asciz "<%#x>"
+.LstrPrintLong:
+ .asciz "<%lld>"
+
diff --git a/vm/mterp/out/InterpAsm-armv6j.S b/vm/mterp/out/InterpAsm-armv6j.S
new file mode 100644
index 000000000..642ebbddc
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-armv6j.S
@@ -0,0 +1,17324 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv6j'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: armv5te/header.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * ARMv5 definitions and declarations.
+ */
+
+/*
+ARM EABI general notes:
+
+r0-r3 hold first 4 args to a method; they are not preserved across method calls
+r4-r8 are available for general use
+r9 is given special treatment in some situations, but not for us
+r10 (sl) seems to be generally available
+r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
+r12 (ip) is scratch -- not preserved across method calls
+r13 (sp) should be managed carefully in case a signal arrives
+r14 (lr) must be preserved
+r15 (pc) can be tinkered with directly
+
+r0 holds returns of <= 4 bytes
+r0-r1 hold returns of 8 bytes, low word in r0
+
+Callee must save/restore r4+ (except r12) if it modifies them. If VFP
+is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
+s0-s15 (d0-d7, q0-a3) do not need to be.
+
+Stack is "full descending". Only the arguments that don't fit in the first 4
+registers are placed on the stack. "sp" points at the first stacked argument
+(i.e. the 5th arg).
+
+VFP: single-precision results in s0, double-precision results in d0.
+
+In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
+64-bit quantities (long long, double) must be 64-bit aligned.
+*/
+
+/*
+Mterp and ARM notes:
+
+The following registers have fixed assignments:
+
+ reg nick purpose
+ r4 rPC interpreted program counter, used for fetching instructions
+ r5 rFP interpreted frame pointer, used for accessing locals and args
+ r6 rSELF self (Thread) pointer
+ r7 rINST first 16-bit code unit of current instruction
+ r8 rIBASE interpreted instruction base pointer, used for computed goto
+
+Macros are provided for common operations. Each macro MUST emit only
+one instruction to make instruction-counting easier. They MUST NOT alter
+unspecified registers or condition codes.
+*/
+
+/* single-purpose registers, given names for clarity */
+#define rPC r4
+#define rFP r5
+#define rSELF r6
+#define rINST r7
+#define rIBASE r8
+
+/* save/restore the PC and/or FP from the thread struct */
+#define LOAD_PC_FROM_SELF() ldr rPC, [rSELF, #offThread_pc]
+#define SAVE_PC_TO_SELF() str rPC, [rSELF, #offThread_pc]
+#define LOAD_FP_FROM_SELF() ldr rFP, [rSELF, #offThread_curFrame]
+#define SAVE_FP_TO_SELF() str rFP, [rSELF, #offThread_curFrame]
+#define LOAD_PC_FP_FROM_SELF() ldmia rSELF, {rPC, rFP}
+#define SAVE_PC_FP_TO_SELF() stmia rSELF, {rPC, rFP}
+
+/*
+ * "export" the PC to the stack frame, f/b/o future exception objects. Must
+ * be done *before* something throws.
+ *
+ * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e.
+ * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc)
+ *
+ * It's okay to do this more than once.
+ */
+#define EXPORT_PC() \
+ str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)]
+
+/*
+ * Given a frame pointer, find the stack save area.
+ *
+ * In C this is "((StackSaveArea*)(_fp) -1)".
+ */
+#define SAVEAREA_FROM_FP(_reg, _fpreg) \
+ sub _reg, _fpreg, #sizeofStackSaveArea
+
+/*
+ * Fetch the next instruction from rPC into rINST. Does not advance rPC.
+ */
+#define FETCH_INST() ldrh rINST, [rPC]
+
+/*
+ * Fetch the next instruction from the specified offset. Advances rPC
+ * to point to the next instruction. "_count" is in 16-bit code units.
+ *
+ * Because of the limited size of immediate constants on ARM, this is only
+ * suitable for small forward movements (i.e. don't try to implement "goto"
+ * with this).
+ *
+ * This must come AFTER anything that can throw an exception, or the
+ * exception catch may miss. (This also implies that it must come after
+ * EXPORT_PC().)
+ */
+#define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #((_count)*2)]!
+
+/*
+ * The operation performed here is similar to FETCH_ADVANCE_INST, except the
+ * src and dest registers are parameterized (not hard-wired to rPC and rINST).
+ */
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+ ldrh _dreg, [_sreg, #((_count)*2)]!
+
+/*
+ * Fetch the next instruction from an offset specified by _reg. Updates
+ * rPC to point to the next instruction. "_reg" must specify the distance
+ * in bytes, *not* 16-bit code units, and may be a signed value.
+ *
+ * We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
+ * bits that hold the shift distance are used for the half/byte/sign flags.
+ * In some cases we can pre-double _reg for free, so we require a byte offset
+ * here.
+ */
+#define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]!
+
+/*
+ * Fetch a half-word code unit from an offset past the current PC. The
+ * "_count" value is in 16-bit code units. Does not advance rPC.
+ *
+ * The "_S" variant works the same but treats the value as signed.
+ */
+#define FETCH(_reg, _count) ldrh _reg, [rPC, #((_count)*2)]
+#define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #((_count)*2)]
+
+/*
+ * Fetch one byte from an offset past the current PC. Pass in the same
+ * "_count" as you would for FETCH, and an additional 0/1 indicating which
+ * byte of the halfword you want (lo/hi).
+ */
+#define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #((_count)*2+(_byte))]
+
+/*
+ * Put the instruction's opcode field into the specified register.
+ */
+#define GET_INST_OPCODE(_reg) and _reg, rINST, #255
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+#define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255
+
+/*
+ * Begin executing the opcode in _reg. Because this only jumps within the
+ * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
+ */
+#define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6
+#define GOTO_OPCODE_BASE(_base,_reg) add pc, _base, _reg, lsl #6
+#define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6
+#define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2]
+#define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2]
+
+/*
+ * Convert a virtual register index into an address.
+ */
+#define VREG_INDEX_TO_ADDR(_reg, _vreg) \
+ add _reg, rFP, _vreg, lsl #2
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "../common/asm-constants.h"
+
+#if defined(WITH_JIT)
+#include "../common/jit-config.h"
+#endif
+
+/* File: armv5te/platform.S */
+/*
+ * ===========================================================================
+ * CPU-version-specific defines
+ * ===========================================================================
+ */
+
+/*
+ * Macro for data memory barrier; not meaningful pre-ARMv6K.
+ */
+.macro SMP_DMB
+.endm
+
+/*
+ * Macro for data memory barrier; not meaningful pre-ARMv6K.
+ */
+.macro SMP_DMB_ST
+.endm
+
+/* File: armv5te/entry.S */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+/*
+ * We don't have formal stack frames, so gdb scans upward in the code
+ * to find the start of the function (a label with the %function type),
+ * and then looks at the next few instructions to figure out what
+ * got pushed onto the stack. From this it figures out how to restore
+ * the registers, including PC, for the previous stack frame. If gdb
+ * sees a non-function label, it stops scanning, so either we need to
+ * have nothing but assembler-local labels between the entry point and
+ * the break, or we need to fake it out.
+ *
+ * When this is defined, we add some stuff to make gdb less confused.
+ */
+#define ASSIST_DEBUGGER 1
+
+ .text
+ .align 2
+ .global dvmMterpStdRun
+ .type dvmMterpStdRun, %function
+
+/*
+ * On entry:
+ * r0 Thread* self
+ *
+ * The return comes via a call to dvmMterpStdBail().
+ */
+dvmMterpStdRun:
+#define MTERP_ENTRY1 \
+ .save {r4-r10,fp,lr}; \
+ stmfd sp!, {r4-r10,fp,lr} @ save 9 regs
+#define MTERP_ENTRY2 \
+ .pad #4; \
+ sub sp, sp, #4 @ align 64
+
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+
+ /* save stack pointer, add magic word for debuggerd */
+ str sp, [r0, #offThread_bailPtr] @ save SP for eventual return
+
+ /* set up "named" registers, figure out entry point */
+ mov rSELF, r0 @ set rSELF
+ LOAD_PC_FP_FROM_SELF() @ load rPC and rFP from "thread"
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable] @ set rIBASE
+
+#if defined(WITH_JIT)
+.LentryInstr:
+ /* Entry is always a possible trace start */
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ FETCH_INST()
+ mov r1, #0 @ prepare the value for the new state
+ str r1, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
+ cmp r0,#0 @ is profiling disabled?
+#if !defined(WITH_SELF_VERIFICATION)
+ bne common_updateProfile @ profiling is enabled
+#else
+ ldr r2, [rSELF, #offThread_shadowSpace] @ to find out the jit exit state
+ beq 1f @ profiling is disabled
+ ldr r3, [r2, #offShadowSpace_jitExitState] @ jit exit state
+ cmp r3, #kSVSTraceSelect @ hot trace following?
+ moveq r2,#kJitTSelectRequestHot @ ask for trace selection
+ beq common_selectTrace @ go build the trace
+ cmp r3, #kSVSNoProfile @ don't profile the next instruction?
+ beq 1f @ intrepret the next instruction
+ b common_updateProfile @ collect profiles
+#endif
+1:
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+#else
+ /* start executing the instruction at rPC */
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+#endif
+
+.Lbad_arg:
+ ldr r0, strBadEntryPoint
+ @ r1 holds value of entryPoint
+ bl printf
+ bl dvmAbort
+ .fnend
+ .size dvmMterpStdRun, .-dvmMterpStdRun
+
+
+ .global dvmMterpStdBail
+ .type dvmMterpStdBail, %function
+
+/*
+ * Restore the stack pointer and PC from the save point established on entry.
+ * This is essentially the same as a longjmp, but should be cheaper. The
+ * last instruction causes us to return to whoever called dvmMterpStdRun.
+ *
+ * We pushed some registers on the stack in dvmMterpStdRun, then saved
+ * SP and LR. Here we restore SP, restore the registers, and then restore
+ * LR to PC.
+ *
+ * On entry:
+ * r0 Thread* self
+ */
+dvmMterpStdBail:
+ ldr sp, [r0, #offThread_bailPtr] @ sp<- saved SP
+ add sp, sp, #4 @ un-align 64
+ ldmfd sp!, {r4-r10,fp,pc} @ restore 9 regs and return
+
+
+/*
+ * String references.
+ */
+strBadEntryPoint:
+ .word .LstrBadEntryPoint
+
+
+ .global dvmAsmInstructionStart
+ .type dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+ .text
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOP: /* 0x00 */
+/* File: armv5te/OP_NOP.S */
+ FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ .type dalvik_inst, %function
+dalvik_inst:
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+ .fnend
+#endif
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE: /* 0x01 */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(r1, 2) @ r1<- BBBB
+ FETCH(r0, 1) @ r0<- AAAA
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AAAA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/OP_MOVE_WIDE.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r2, r2, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[B]
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/OP_MOVE_WIDE_FROM16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH(r3, 1) @ r3<- BBBB
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/OP_MOVE_WIDE_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+ FETCH(r3, 2) @ r3<- BBBB
+ FETCH(r2, 1) @ r2<- AAAA
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA]
+ ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB]
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/OP_MOVE_OBJECT.S */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/OP_MOVE_OBJECT_FROM16.S */
+/* File: armv5te/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ mov r0, rINST, lsr #8 @ r0<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/OP_MOVE_OBJECT_16.S */
+/* File: armv5te/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(r1, 2) @ r1<- BBBB
+ FETCH(r0, 1) @ r0<- AAAA
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[BBBB]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r0) @ fp[AAAA]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r0, [rSELF, #offThread_retval] @ r0<- self->retval.i
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[AA]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/OP_MOVE_RESULT_WIDE.S */
+ /* move-result-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r3, rSELF, #offThread_retval @ r3<- &self->retval
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ ldmia r3, {r0-r1} @ r0/r1<- retval.j
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r2, {r0-r1} @ fp[AA]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/OP_MOVE_RESULT_OBJECT.S */
+/* File: armv5te/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r0, [rSELF, #offThread_retval] @ r0<- self->retval.i
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[AA]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/OP_MOVE_EXCEPTION.S */
+ /* move-exception vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ ldr r3, [rSELF, #offThread_exception] @ r3<- dvmGetException bypass
+ mov r1, #0 @ r1<- 0
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ SET_VREG(r3, r2) @ fp[AA]<- exception obj
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r1, [rSELF, #offThread_exception] @ dvmClearException bypass
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/OP_RETURN_VOID.S */
+ b common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN: /* 0x0f */
+/* File: armv5te/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r0, r2) @ r0<- vAA
+ str r0, [rSELF, #offThread_retval] @ retval.i <- vAA
+ b common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/OP_RETURN_WIDE.S */
+ /*
+ * Return a 64-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ */
+ /* return-wide vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[AA]
+ add r3, rSELF, #offThread_retval @ r3<- &self->retval
+ ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1
+ stmia r3, {r0-r1} @ retval<- r0/r1
+ b common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/OP_RETURN_OBJECT.S */
+/* File: armv5te/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r0, r2) @ r0<- vAA
+ str r0, [rSELF, #offThread_retval] @ retval.i <- vAA
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_4: /* 0x12 */
+/* File: armv5te/OP_CONST_4.S */
+ /* const/4 vA, #+B */
+ mov r1, rINST, lsl #16 @ r1<- Bxxx0000
+ mov r0, rINST, lsr #8 @ r0<- A+
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended)
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r1, r0) @ fp[A]<- r1
+ GOTO_OPCODE(ip) @ execute next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_16: /* 0x13 */
+/* File: armv5te/OP_CONST_16.S */
+ /* const/16 vAA, #+BBBB */
+ FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST: /* 0x14 */
+/* File: armv5te/OP_CONST.S */
+ /* const vAA, #+BBBBbbbb */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (high)
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/OP_CONST_HIGH16.S */
+ /* const/high16 vAA, #+BBBB0000 */
+ FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, r0, lsl #16 @ r0<- BBBB0000
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/OP_CONST_WIDE_16.S */
+ /* const-wide/16 vAA, #+BBBB */
+ FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/OP_CONST_WIDE_32.S */
+ /* const-wide/32 vAA, #+BBBBbbbb */
+ FETCH(r0, 1) @ r0<- 0000bbbb (low)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_S(r2, 2) @ r2<- ssssBBBB (high)
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ mov r1, r0, asr #31 @ r1<- ssssssss
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/OP_CONST_WIDE.S */
+ /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (low middle)
+ FETCH(r2, 3) @ r2<- hhhh (high middle)
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
+ FETCH(r3, 4) @ r3<- HHHH (high)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST(5) @ advance rPC, load rINST
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/OP_CONST_WIDE_HIGH16.S */
+ /* const-wide/high16 vAA, #+BBBB000000000000 */
+ FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ mov r0, #0 @ r0<- 00000000
+ mov r1, r1, lsl #16 @ r1<- BBBB0000
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/OP_CONST_STRING.S */
+ /* const/string vAA, String@BBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- self->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
+ cmp r0, #0 @ not yet resolved?
+ beq .LOP_CONST_STRING_resolve
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/OP_CONST_STRING_JUMBO.S */
+ /* const/string vAA, String@BBBBBBBB */
+ FETCH(r0, 1) @ r0<- bbbb (low)
+ FETCH(r1, 2) @ r1<- BBBB (high)
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- self->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB]
+ cmp r0, #0
+ beq .LOP_CONST_STRING_JUMBO_resolve
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/OP_CONST_CLASS.S */
+ /* const/class vAA, Class@BBBB */
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- self->methodClassDex
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses
+ ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB]
+ cmp r0, #0 @ not yet resolved?
+ beq .LOP_CONST_CLASS_resolve
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/OP_MONITOR_ENTER.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r1, r2) @ r1<- vAA (object)
+ mov r0, rSELF @ r0<- self
+ cmp r1, #0 @ null object?
+ EXPORT_PC() @ need for precise GC
+ beq common_errNullObject @ null object, throw an exception
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl dvmLockObject @ call(self, obj)
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/OP_MONITOR_EXIT.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ EXPORT_PC() @ before fetch: export the PC
+ GET_VREG(r1, r2) @ r1<- vAA (object)
+ cmp r1, #0 @ null object?
+ beq 1f @ yes
+ mov r0, rSELF @ r0<- self
+ bl dvmUnlockObject @ r0<- success for unlock(self, obj)
+ cmp r0, #0 @ failed?
+ FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST
+ beq common_exceptionThrown @ yes, exception is pending
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+1:
+ FETCH_ADVANCE_INST(1) @ advance before throw
+ b common_errNullObject
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/OP_CHECK_CAST.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast vAA, class@BBBB */
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH(r2, 1) @ r2<- BBBB
+ GET_VREG(r9, r3) @ r9<- object
+ ldr r0, [rSELF, #offThread_methodClassDex] @ r0<- pDvmDex
+ cmp r9, #0 @ is object null?
+ ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses
+ beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds
+ ldr r1, [r0, r2, lsl #2] @ r1<- resolved class
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ cmp r1, #0 @ have we resolved this before?
+ beq .LOP_CHECK_CAST_resolve @ not resolved, do it now
+.LOP_CHECK_CAST_resolved:
+ cmp r0, r1 @ same class (trivial success)?
+ bne .LOP_CHECK_CAST_fullcheck @ no, do full check
+.LOP_CHECK_CAST_okay:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/OP_INSTANCE_OF.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ /* instance-of vA, vB, class@CCCC */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB (object)
+ and r9, r9, #15 @ r9<- A
+ cmp r0, #0 @ is object null?
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- pDvmDex
+ beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0
+ FETCH(r3, 1) @ r3<- CCCC
+ ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses
+ ldr r1, [r2, r3, lsl #2] @ r1<- resolved class
+ ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
+ cmp r1, #0 @ have we resolved this before?
+ beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now
+.LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class
+ cmp r0, r1 @ same class (trivial success)?
+ beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish
+ b .LOP_INSTANCE_OF_fullcheck @ no, do full check
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/OP_ARRAY_LENGTH.S */
+ /*
+ * Return the length of an array.
+ */
+ mov r1, rINST, lsr #12 @ r1<- B
+ mov r2, rINST, lsr #8 @ r2<- A+
+ GET_VREG(r0, r1) @ r0<- vB (object ref)
+ and r2, r2, #15 @ r2<- A
+ cmp r0, #0 @ is object null?
+ beq common_errNullObject @ yup, fail
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ ldr r3, [r0, #offArrayObject_length] @ r3<- array length
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r3, r2) @ vB<- length
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/OP_NEW_INSTANCE.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance vAA, class@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+#if defined(WITH_JIT)
+ add r10, r3, r1, lsl #2 @ r10<- &resolved_class
+#endif
+ EXPORT_PC() @ req'd for init, resolve, alloc
+ cmp r0, #0 @ already resolved?
+ beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now
+.LOP_NEW_INSTANCE_resolved: @ r0=class
+ ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum
+ cmp r1, #CLASS_INITIALIZED @ has class been initialized?
+ bne .LOP_NEW_INSTANCE_needinit @ no, init class now
+.LOP_NEW_INSTANCE_initialized: @ r0=class
+ mov r1, #ALLOC_DONT_TRACK @ flags for alloc call
+ bl dvmAllocObject @ r0<- new object
+ b .LOP_NEW_INSTANCE_finish @ continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/OP_NEW_ARRAY.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ FETCH(r2, 1) @ r2<- CCCC
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ GET_VREG(r1, r0) @ r1<- vB (array length)
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ cmp r1, #0 @ check length
+ ldr r0, [r3, r2, lsl #2] @ r0<- resolved class
+ bmi common_errNegativeArraySize @ negative length, bail - len in r1
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ req'd for resolve, alloc
+ bne .LOP_NEW_ARRAY_finish @ resolved, continue
+ b .LOP_NEW_ARRAY_resolve @ do resolve now
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ EXPORT_PC() @ need for resolve and alloc
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ mov r10, rINST, lsr #8 @ r10<- AA or BA
+ cmp r0, #0 @ already resolved?
+ bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on
+8: ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_continue
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */
+/* File: armv5te/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses
+ EXPORT_PC() @ need for resolve and alloc
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved class
+ mov r10, rINST, lsr #8 @ r10<- AA or BA
+ cmp r0, #0 @ already resolved?
+ bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on
+8: ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_RANGE_continue
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/OP_FILL_ARRAY_DATA.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
+ GET_VREG(r0, r3) @ r0<- vAA (array object)
+ add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
+ EXPORT_PC();
+ bl dvmInterpHandleFillArrayData@ fill the array with predefined data
+ cmp r0, #0 @ 0 means an exception is thrown
+ beq common_exceptionThrown @ has exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_THROW: /* 0x27 */
+/* File: armv5te/OP_THROW.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ mov r2, rINST, lsr #8 @ r2<- AA
+ GET_VREG(r1, r2) @ r1<- vAA (exception object)
+ EXPORT_PC() @ exception handler can throw
+ cmp r1, #0 @ null object?
+ beq common_errNullObject @ yes, throw an NPE instead
+ @ bypass dvmSetException, just store it
+ str r1, [rSELF, #offThread_exception] @ thread->exception<- obj
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO: /* 0x28 */
+/* File: armv5te/OP_GOTO.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ /* tuning: use sbfx for 6t2+ targets */
+ mov r0, rINST, lsl #16 @ r0<- AAxx0000
+ movs r1, r0, asr #24 @ r1<- ssssssAA (sign-extended)
+ add r2, r1, r1 @ r2<- byte offset, set flags
+ @ If backwards branch refresh rIBASE
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ bmi common_testUpdateProfile @ (r0) check for trace hotness
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/OP_GOTO_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended)
+ adds r1, r0, r0 @ r1<- byte offset, flags set
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ bmi common_testUpdateProfile @ (r0) hot trace head?
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/OP_GOTO_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0". Because
+ * we need the V bit set, we'll use an adds to convert from Dalvik
+ * offset to byte offset.
+ */
+ /* goto/32 +AAAAAAAA */
+ FETCH(r0, 1) @ r0<- aaaa (lo)
+ FETCH(r1, 2) @ r1<- AAAA (hi)
+ orr r0, r0, r1, lsl #16 @ r0<- AAAAaaaa
+ adds r1, r0, r0 @ r1<- byte offset
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ ble common_testUpdateProfile @ (r0) hot trace head?
+#else
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * When the JIT is present, all targets are considered treated as
+ * a potential trace heads regardless of branch direction.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG(r1, r3) @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ cmp r0, #0
+ bne common_updateProfile
+#else
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/OP_SPARSE_SWITCH.S */
+/* File: armv5te/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * When the JIT is present, all targets are considered treated as
+ * a potential trace heads regardless of branch direction.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(r0, 1) @ r0<- bbbb (lo)
+ FETCH(r1, 2) @ r1<- BBBB (hi)
+ mov r3, rINST, lsr #8 @ r3<- AA
+ orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
+ GET_VREG(r1, r3) @ r1<- vAA
+ add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2
+ bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset
+ adds r1, r0, r0 @ r1<- byte offset; clear V
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+ cmp r0, #0
+ bne common_updateProfile
+#else
+ ldrle rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh handler base
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5te/OP_CMPL_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * The straightforward implementation requires 3 calls to functions
+ * that return a result in r0. We can do it with two calls if our
+ * EABI library supports __aeabi_cfcmple (only one if we want to check
+ * for NaN directly):
+ * check x <= y
+ * if <, return -1
+ * if ==, return 0
+ * check y <= x
+ * if <, return 1
+ * return {-1,1}
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ copy to arg registers
+ mov r1, r10
+ bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPL_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5te/OP_CMPG_FLOAT.S */
+/* File: armv5te/OP_CMPL_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * The straightforward implementation requires 3 calls to functions
+ * that return a result in r0. We can do it with two calls if our
+ * EABI library supports __aeabi_cfcmple (only one if we want to check
+ * for NaN directly):
+ * check x <= y
+ * if <, return -1
+ * if ==, return 0
+ * check y <= x
+ * if <, return 1
+ * return {-1,1}
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ copy to arg registers
+ mov r1, r10
+ bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPG_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5te/OP_CMPL_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r9, r0, #255 @ r9<- BB
+ mov r10, r0, lsr #8 @ r10<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[BB]
+ add r10, rFP, r10, lsl #2 @ r10<- &fp[CC]
+ ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1
+ bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPL_DOUBLE_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5te/OP_CMPG_DOUBLE.S */
+/* File: armv5te/OP_CMPL_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into r1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r9, r0, #255 @ r9<- BB
+ mov r10, r0, lsr #8 @ r10<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[BB]
+ add r10, rFP, r10, lsl #2 @ r10<- &fp[CC]
+ ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1
+ bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq
+ bhi .LOP_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate
+ mvncc r1, #0 @ (less than) r1<- -1
+ moveq r1, #0 @ (equal) r1<- 0, trumps less than
+.LOP_CMPG_DOUBLE_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/OP_CMP_LONG.S */
+ /*
+ * Compare two 64-bit values. Puts 0, 1, or -1 into the destination
+ * register based on the results of the comparison.
+ *
+ * We load the full values with LDM, but in practice many values could
+ * be resolved by only looking at the high word. This could be made
+ * faster or slower by splitting the LDM into a pair of LDRs.
+ *
+ * If we just wanted to set condition flags, we could do this:
+ * subs ip, r0, r2
+ * sbcs ip, r1, r3
+ * subeqs ip, r0, r2
+ * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific
+ * integer value, which we can do with 2 conditional mov/mvn instructions
+ * (set 1, set -1; if they're equal we already have 0 in ip), giving
+ * us a constant 5-cycle path plus a branch at the end to the
+ * instruction epilogue code. The multi-compare approach below needs
+ * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch
+ * in the worst case (the 64-bit values are equal).
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ cmp r1, r3 @ compare (vBB+1, vCC+1)
+ blt .LOP_CMP_LONG_less @ signed compare on high part
+ bgt .LOP_CMP_LONG_greater
+ subs r1, r0, r2 @ r1<- r0 - r2
+ bhi .LOP_CMP_LONG_greater @ unsigned compare on low part
+ bne .LOP_CMP_LONG_less
+ b .LOP_CMP_LONG_finish @ equal; r1 already holds 0
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/OP_IF_EQ.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movne r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NE: /* 0x33 */
+/* File: armv5te/OP_IF_NE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ moveq r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LT: /* 0x34 */
+/* File: armv5te/OP_IF_LT.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movge r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GE: /* 0x35 */
+/* File: armv5te/OP_IF_GE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movlt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GT: /* 0x36 */
+/* File: armv5te/OP_IF_GT.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movle r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LE: /* 0x37 */
+/* File: armv5te/OP_IF_LE.S */
+/* File: armv5te/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r3, r1) @ r3<- vB
+ GET_VREG(r2, r0) @ r2<- vA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, r3 @ compare (vA, vB)
+ movgt r1, #2 @ r1<- BYTE branch dist for not-taken
+ adds r2, r1, r1 @ convert to bytes, check sign
+ FETCH_ADVANCE_INST_RB(r2) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ cmp r0,#0
+ bne common_updateProfile
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/OP_IF_EQZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movne r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/OP_IF_NEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ moveq r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/OP_IF_LTZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movge r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/OP_IF_GEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movlt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/OP_IF_GTZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movle r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/OP_IF_LEZ.S */
+/* File: armv5te/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ mov r0, rINST, lsr #8 @ r0<- AA
+ GET_VREG(r2, r0) @ r2<- vAA
+ FETCH_S(r1, 1) @ r1<- branch offset, in code units
+ cmp r2, #0 @ compare (vA, 0)
+ movgt r1, #2 @ r1<- inst branch dist for not-taken
+ adds r1, r1, r1 @ convert to bytes & set flags
+ FETCH_ADVANCE_INST_RB(r1) @ update rPC, load rINST
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+ cmp r0,#0
+ bne common_updateProfile @ test for JIT off at target
+#else
+ ldrmi rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh table base
+#endif
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/OP_UNUSED_3E.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/OP_UNUSED_3F.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/OP_UNUSED_40.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/OP_UNUSED_41.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/OP_UNUSED_42.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/OP_UNUSED_43.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET: /* 0x44 */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5te/OP_AGET_WIDE.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcc .LOP_AGET_WIDE_finish @ okay, continue below
+ b common_errArrayIndex @ index >= length, bail
+ @ May want to swap the order of these two branches depending on how the
+ @ branch prediction (if any) handles conditional forward branches vs.
+ @ unconditional forward branches.
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/OP_AGET_OBJECT.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/OP_AGET_BOOLEAN.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/OP_AGET_BYTE.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/OP_AGET_CHAR.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/OP_AGET_SHORT.S */
+/* File: armv5te/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r2, r9) @ vAA<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT: /* 0x4b */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5te/OP_APUT_WIDE.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ bcc .LOP_APUT_WIDE_finish @ okay, continue below
+ b common_errArrayIndex @ index >= length, bail
+ @ May want to swap the order of these two branches depending on how the
+ @ branch prediction (if any) handles conditional forward branches vs.
+ @ unconditional forward branches.
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/OP_APUT_OBJECT.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(rINST, r2) @ rINST<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp rINST, #0 @ null array object?
+ GET_VREG(r9, r9) @ r9<- vAA
+ beq common_errNullObject @ yes, bail
+ ldr r3, [rINST, #offArrayObject_length] @ r3<- arrayObj->length
+ add r10, rINST, r1, lsl #2 @ r10<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on
+ b common_errArrayIndex @ index >= length, bail
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/OP_APUT_BOOLEAN.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/OP_APUT_BYTE.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/OP_APUT_CHAR.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/OP_APUT_SHORT.S */
+/* File: armv5te/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(r2, 1, 0) @ r2<- BB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ FETCH_B(r3, 1, 1) @ r3<- CC
+ GET_VREG(r0, r2) @ r0<- vBB (array object)
+ GET_VREG(r1, r3) @ r1<- vCC (requested index)
+ cmp r0, #0 @ null array object?
+ beq common_errNullObject @ yes, bail
+ ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length
+ add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width
+ cmp r1, r3 @ compare unsigned index, length
+ bcs common_errArrayIndex @ index >= length, bail
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r2, r9) @ r2<- vAA
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET: /* 0x52 */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5te/OP_IGET_WIDE.S */
+ /*
+ * Wide 32-bit instance field get.
+ */
+ /* iget-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_WIDE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_WIDE_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/OP_IGET_OBJECT.S */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_OBJECT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_OBJECT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/OP_IGET_BOOLEAN.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_BOOLEAN_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_BOOLEAN_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/OP_IGET_BYTE.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_BYTE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_BYTE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/OP_IGET_CHAR.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_CHAR_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_CHAR_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/OP_IGET_SHORT.S */
+@include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" }
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_SHORT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_SHORT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT: /* 0x59 */
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5te/OP_IPUT_WIDE.S */
+ /* iput-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_WIDE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_WIDE_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/OP_IPUT_OBJECT.S */
+ /*
+ * 32-bit instance field put.
+ *
+ * for: iput-object, iput-object-volatile
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_OBJECT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_OBJECT_finish @ yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/OP_IPUT_BOOLEAN.S */
+@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/OP_IPUT_BYTE.S */
+@include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_BYTE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_BYTE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/OP_IPUT_CHAR.S */
+@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_CHAR_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_CHAR_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/OP_IPUT_SHORT.S */
+@include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" }
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_SHORT_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_SHORT_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET: /* 0x60 */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_resolve @ yes, do resolve
+.LOP_SGET_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5te/OP_SGET_WIDE.S */
+ /*
+ * 64-bit SGET handler.
+ */
+ /* sget-wide vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_WIDE_resolve @ yes, do resolve
+.LOP_SGET_WIDE_finish:
+ mov r9, rINST, lsr #8 @ r9<- AA
+ .if 0
+ add r0, r0, #offStaticField_value @ r0<- pointer to data
+ bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field
+ .else
+ ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+ .endif
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/OP_SGET_OBJECT.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_OBJECT_resolve @ yes, do resolve
+.LOP_SGET_OBJECT_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/OP_SGET_BOOLEAN.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve
+.LOP_SGET_BOOLEAN_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/OP_SGET_BYTE.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_BYTE_resolve @ yes, do resolve
+.LOP_SGET_BYTE_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/OP_SGET_CHAR.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_CHAR_resolve @ yes, do resolve
+.LOP_SGET_CHAR_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/OP_SGET_SHORT.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_SHORT_resolve @ yes, do resolve
+.LOP_SGET_SHORT_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT: /* 0x67 */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_resolve @ yes, do resolve
+.LOP_SPUT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5te/OP_SPUT_WIDE.S */
+ /*
+ * 64-bit SPUT handler.
+ */
+ /* sput-wide vAA, field@BBBB */
+ ldr r0, [rSELF, #offThread_methodClassDex] @ r0<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r0, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r10, r1, lsl #2] @ r2<- resolved StaticField ptr
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r2, #0 @ is resolved entry null?
+ beq .LOP_SPUT_WIDE_resolve @ yes, do resolve
+.LOP_SPUT_WIDE_finish: @ field ptr in r2, AA in r9
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_INST_OPCODE(r10) @ extract opcode from rINST
+ .if 0
+ add r2, r2, #offStaticField_value @ r2<- pointer to data
+ bl dvmQuasiAtomicSwap64Sync @ stores r0/r1 into addr r2
+ .else
+ strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1
+ .endif
+ GOTO_OPCODE(r10) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/OP_SPUT_OBJECT.S */
+ /*
+ * 32-bit SPUT handler for objects
+ *
+ * for: sput-object, sput-object-volatile
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_OBJECT_resolve @ yes, do resolve
+.LOP_SPUT_OBJECT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ ldr r9, [r0, #offField_clazz] @ r9<- field->clazz
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ b .LOP_SPUT_OBJECT_end
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/OP_SPUT_BOOLEAN.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve
+.LOP_SPUT_BOOLEAN_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/OP_SPUT_BYTE.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_BYTE_resolve @ yes, do resolve
+.LOP_SPUT_BYTE_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/OP_SPUT_CHAR.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_CHAR_resolve @ yes, do resolve
+.LOP_SPUT_CHAR_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/OP_SPUT_SHORT.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_SHORT_resolve @ yes, do resolve
+.LOP_SPUT_SHORT_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_VIRTUAL_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ cmp r9, #0 @ null "this"?
+ ldr r10, [rSELF, #offThread_method] @ r10<- current method
+ beq common_errNullObject @ null "this", throw exception
+ cmp r0, #0 @ already resolved?
+ ldr r10, [r10, #offMethod_clazz] @ r10<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_SUPER_continue @ resolved, continue on
+ b .LOP_INVOKE_SUPER_resolve @ do resolve now
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now
+.LOP_INVOKE_DIRECT_finish:
+ cmp r9, #0 @ null "this" ref?
+ bne common_invokeMethodNoRange @ r0=method, r9="this"
+ b common_errNullObject @ yes, throw exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ mov r9, #0 @ null "this" in delay slot
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+#if defined(WITH_JIT)
+ add r10, r3, r1, lsl #2 @ r10<- &resolved_methodToCall
+#endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne common_invokeMethodNoRange @ yes, continue on
+ b .LOP_INVOKE_STATIC_resolve
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r2, 2) @ r2<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!0)
+ and r2, r2, #15 @ r2<- C (or stays CCCC)
+ .endif
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r9, r2) @ r9<- first arg ("this")
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- methodClassDex
+ cmp r9, #0 @ null obj?
+ ldr r2, [rSELF, #offThread_method] @ r2<- method
+ beq common_errNullObject @ yes, fail
+ ldr r0, [r9, #offObject_clazz] @ r0<- thisPtr->clazz
+ bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle exception
+ b common_invokeMethodNoRange @ (r0=method, r9="this")
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/OP_UNUSED_73.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */
+/* File: armv5te/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/OP_INVOKE_SUPER_RANGE.S */
+/* File: armv5te/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod
+ cmp r9, #0 @ null "this"?
+ ldr r10, [rSELF, #offThread_method] @ r10<- current method
+ beq common_errNullObject @ null "this", throw exception
+ cmp r0, #0 @ already resolved?
+ ldr r10, [r10, #offMethod_clazz] @ r10<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on
+ b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */
+/* File: armv5te/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now
+.LOP_INVOKE_DIRECT_RANGE_finish:
+ cmp r9, #0 @ null "this" ref?
+ bne common_invokeMethodRange @ r0=method, r9="this"
+ b common_errNullObject @ yes, throw exception
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/OP_INVOKE_STATIC_RANGE.S */
+/* File: armv5te/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- pDvmDex
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods
+ mov r9, #0 @ null "this" in delay slot
+ ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall
+#if defined(WITH_JIT)
+ add r10, r3, r1, lsl #2 @ r10<- &resolved_methodToCall
+#endif
+ cmp r0, #0 @ already resolved?
+ EXPORT_PC() @ must export for invoke
+ bne common_invokeMethodRange @ yes, continue on
+ b .LOP_INVOKE_STATIC_RANGE_resolve
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */
+/* File: armv5te/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r2, 2) @ r2<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!1)
+ and r2, r2, #15 @ r2<- C (or stays CCCC)
+ .endif
+ EXPORT_PC() @ must export for invoke
+ GET_VREG(r9, r2) @ r9<- first arg ("this")
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- methodClassDex
+ cmp r9, #0 @ null obj?
+ ldr r2, [rSELF, #offThread_method] @ r2<- method
+ beq common_errNullObject @ yes, fail
+ ldr r0, [r9, #offObject_clazz] @ r0<- thisPtr->clazz
+ bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex)
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yes, handle exception
+ b common_invokeMethodRange @ (r0=method, r9="this")
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/OP_UNUSED_79.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/OP_UNUSED_7A.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/OP_NEG_INT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ rsb r0, r0, #0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/OP_NOT_INT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mvn r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/OP_NEG_LONG.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ rsbs r0, r0, #0 @ optional op; may set condition codes
+ rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/OP_NOT_LONG.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mvn r0, r0 @ optional op; may set condition codes
+ mvn r1, r1 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/OP_NEG_FLOAT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/OP_NEG_DOUBLE.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/OP_INT_TO_LONG.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ mov r1, r0, asr #31 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5te/OP_INT_TO_FLOAT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_i2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5te/OP_INT_TO_DOUBLE.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_i2d @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/OP_LONG_TO_INT.S */
+/* we ignore the high word, making this equivalent to a 32-bit reg move */
+/* File: armv5te/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ mov r1, rINST, lsr #12 @ r1<- B from 15:12
+ mov r0, rINST, lsr #8 @ r0<- A from 11:8
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ GET_VREG(r2, r1) @ r2<- fp[B]
+ and r0, r0, #15
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ SET_VREG(r2, r0) @ fp[A]<- r2
+ GOTO_OPCODE(ip) @ execute next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/OP_LONG_TO_FLOAT.S */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/OP_LONG_TO_DOUBLE.S */
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5te/OP_FLOAT_TO_INT.S */
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_f2iz @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+#if 0
+@include "armv5te/unop.S" {"instr":"bl f2i_doconv"}
+@break
+/*
+ * Convert the float in r0 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2i_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x4f000000 @ (float)maxint
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxint?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0x80000000 @ return maxint (7fffffff)
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xcf000000 @ (float)minint
+ bl __aeabi_fcmple @ is arg <= minint?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0x80000000 @ return minint (80000000)
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ ldmeqfd sp!, {r4, pc} @ return zero for NaN
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2iz @ convert float to int
+ ldmfd sp!, {r4, pc}
+#endif
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/OP_FLOAT_TO_LONG.S */
+@include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"}
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl f2l_doconv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5te/OP_FLOAT_TO_DOUBLE.S */
+/* File: armv5te/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0", where
+ * "result" is a 64-bit quantity in r0/r1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r0, r3) @ r0<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ bl __aeabi_f2d @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vA/vA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5te/OP_DOUBLE_TO_INT.S */
+/* EABI appears to have Java-style conversions of +inf/-inf/NaN */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_d2iz @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+#if 0
+@include "armv5te/unopNarrower.S" {"instr":"bl d2i_doconv"}
+@break
+/*
+ * Convert the double in r0/r1 to an int in r0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2i_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ mov r2, #0x80000000 @ maxint, as a double (low word)
+ mov r2, r2, asr #9 @ 0xffc00000
+ sub sp, sp, #4 @ align for EABI
+ mvn r3, #0xbe000000 @ maxint, as a double (high word)
+ sub r3, r3, #0x00200000 @ 0x41dfffff
+ mov r4, r0 @ save a copy of r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxint?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0x80000000 @ return maxint (0x7fffffff)
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r3, #0xc1000000 @ minint, as a double (high word)
+ add r3, r3, #0x00e00000 @ 0xc1e00000
+ mov r2, #0 @ minint, as a double (low word)
+ bl __aeabi_dcmple @ is arg <= minint?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0x80000000 @ return minint (80000000)
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ beq 1f @ return zero for NaN
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2iz @ convert double to int
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
+#endif
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/OP_DOUBLE_TO_LONG.S */
+@include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"}
+/* File: armv5te/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0/r1".
+ * This could be an ARM instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r3, {r0-r1} @ r0/r1<- vAA
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl d2l_doconv @ r0/r1<- op, r2-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5te/OP_DOUBLE_TO_FLOAT.S */
+/* File: armv5te/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op r0/r1", where
+ * "result" is a 32-bit quantity in r0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ *
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[B]
+ and r9, r9, #15
+ ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_d2f @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv6/OP_INT_TO_BYTE.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ sxtb r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv6/OP_INT_TO_CHAR.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ uxth r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv6/OP_INT_TO_SHORT.S */
+/* File: armv5te/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op r0".
+ * This could be an ARM instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ mov r3, rINST, lsr #12 @ r3<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r3) @ r0<- vB
+ and r9, r9, #15
+ @ optional op; may set condition codes
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ sxth r0, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/OP_ADD_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/OP_SUB_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/OP_MUL_INT.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/OP_DIV_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT: /* 0x94 */
+/* File: armv5te/OP_REM_INT.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT: /* 0x95 */
+/* File: armv5te/OP_AND_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT: /* 0x96 */
+/* File: armv5te/OP_OR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/OP_XOR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/OP_SHL_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/OP_SHR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/OP_USHR_INT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/OP_ADD_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/OP_SUB_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/OP_MUL_LONG.S */
+ /*
+ * Signed 64-bit integer multiply.
+ *
+ * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
+ * WX
+ * x YZ
+ * --------
+ * ZW ZX
+ * YW YX
+ *
+ * The low word of the result holds ZX, the high word holds
+ * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because
+ * it doesn't fit in the low 64 bits.
+ *
+ * Unlike most ARM math operations, multiply instructions have
+ * restrictions on using the same register more than once (Rd and Rm
+ * cannot be the same).
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST, lsr #8 @ r0<- AA
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ add r0, rFP, r0, lsl #2 @ r0<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_MUL_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/OP_DIV_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/OP_REM_LONG.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/OP_AND_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/OP_OR_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/OP_XOR_LONG.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/OP_SHL_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_SHL_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/OP_SHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_SHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/OP_USHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r3, r0, #255 @ r3<- BB
+ mov r0, r0, lsr #8 @ r0<- CC
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[BB]
+ GET_VREG(r2, r0) @ r2<- vCC
+ ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1
+ and r2, r2, #63 @ r0<- r0 & 0x3f
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ b .LOP_USHR_LONG_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5te/OP_ADD_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fadd @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5te/OP_SUB_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fsub @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5te/OP_MUL_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fmul @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5te/OP_DIV_FLOAT.S */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl __aeabi_fdiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/OP_REM_FLOAT.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5te/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
+ * mul-float, div-float, rem-float
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ mov r3, r0, lsr #8 @ r3<- CC
+ and r2, r0, #255 @ r2<- BB
+ GET_VREG(r1, r3) @ r1<- vCC
+ GET_VREG(r0, r2) @ r0<- vBB
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 11-14 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5te/OP_ADD_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dadd @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5te/OP_SUB_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dsub @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5te/OP_MUL_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dmul @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5te/OP_DIV_DOUBLE.S */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ddiv @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/OP_REM_DOUBLE.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5te/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(r0, 1) @ r0<- CCBB
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ add r2, rFP, r2, lsl #2 @ r2<- &fp[BB]
+ add r3, rFP, r3, lsl #2 @ r3<- &fp[CC]
+ ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
+ ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 14-17 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/OP_ADD_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/OP_SUB_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ sub r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/OP_MUL_INT_2ADDR.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/OP_DIV_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/OP_REM_INT_2ADDR.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/OP_AND_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/OP_OR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/OP_XOR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/OP_SHL_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/OP_SHR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/OP_USHR_INT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/OP_ADD_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ adds r0, r0, r2 @ optional op; may set condition codes
+ adc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/OP_SUB_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ subs r0, r0, r2 @ optional op; may set condition codes
+ sbc r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/OP_MUL_LONG_2ADDR.S */
+ /*
+ * Signed 64-bit integer multiply, "/2addr" version.
+ *
+ * See OP_MUL_LONG for an explanation.
+ *
+ * We get a little tight on registers, so to avoid looking up &fp[A]
+ * again we stuff it into rINST.
+ */
+ /* mul-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1
+ mul ip, r2, r1 @ ip<- ZxW
+ umull r9, r10, r2, r0 @ r9/r10 <- ZxX
+ mla r2, r0, r3, ip @ r2<- YxX + (ZxW)
+ mov r0, rINST @ r0<- &fp[A] (free up rINST)
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX))
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/OP_DIV_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/OP_REM_LONG_2ADDR.S */
+/* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 1
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ldivmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/OP_AND_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ and r0, r0, r2 @ optional op; may set condition codes
+ and r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/OP_OR_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ orr r0, r0, r2 @ optional op; may set condition codes
+ orr r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/OP_XOR_LONG_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ eor r0, r0, r2 @ optional op; may set condition codes
+ eor r1, r1, r3 @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/OP_SHL_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r1, r1, asl r2 @ r1<- r1 << r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32)
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ b .LOP_SHL_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/OP_SHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32)
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ b .LOP_SHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/OP_USHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r2, r3) @ r2<- vB
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ and r2, r2, #63 @ r2<- r2 & 0x3f
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+
+ mov r0, r0, lsr r2 @ r0<- r2 >> r2
+ rsb r3, r2, #32 @ r3<- 32 - r2
+ orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2))
+ subs ip, r2, #32 @ ip<- r2 - 32
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+ movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32)
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ b .LOP_USHR_LONG_2ADDR_finish
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5te/OP_ADD_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fadd @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5te/OP_SUB_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fsub @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5te/OP_MUL_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fmul @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5te/OP_DIV_FLOAT_2ADDR.S */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_fdiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/OP_REM_FLOAT_2ADDR.S */
+/* EABI doesn't define a float remainder function, but libm does */
+/* File: armv5te/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
+ * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r3, rINST, lsr #12 @ r3<- B
+ and r9, r9, #15
+ GET_VREG(r1, r3) @ r1<- vB
+ GET_VREG(r0, r9) @ r0<- vA
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmodf @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5te/OP_ADD_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dadd @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5te/OP_SUB_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dsub @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5te/OP_MUL_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_dmul @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5te/OP_DIV_DOUBLE_2ADDR.S */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_ddiv @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/OP_REM_DOUBLE_2ADDR.S */
+/* EABI doesn't define a double remainder function, but libm does */
+/* File: armv5te/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0-r1 op r2-r3".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
+ * sub-double/2addr, mul-double/2addr, div-double/2addr,
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ mov r9, rINST, lsr #8 @ r9<- A+
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r9, r9, #15
+ add r1, rFP, r1, lsl #2 @ r1<- &fp[B]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[A]
+ ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ .if 0
+ orrs ip, r2, r3 @ second arg (r2-r3) is zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl fmod @ result<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 12-15 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/OP_ADD_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/OP_RSUB_INT.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/OP_MUL_INT_LIT16.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/OP_DIV_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/OP_REM_INT_LIT16.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 1
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/OP_AND_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/OP_OR_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/OP_XOR_INT_LIT16.S */
+/* File: armv5te/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ /* binop/lit16 vA, vB, #+CCCC */
+ FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended)
+ mov r2, rINST, lsr #12 @ r2<- B
+ mov r9, rINST, lsr #8 @ r9<- A+
+ GET_VREG(r0, r2) @ r0<- vB
+ and r9, r9, #15
+ .if 0
+ cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-13 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/OP_ADD_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ add r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/OP_RSUB_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ rsb r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/OP_MUL_INT_LIT8.S */
+/* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ mul r0, r1, r0 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/OP_DIV_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 1
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idiv @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/OP_REM_INT_LIT8.S */
+/* idivmod returns quotient in r0 and remainder in r1 */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 1
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ bl __aeabi_idivmod @ r1<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/OP_AND_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ and r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/OP_OR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ orr r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/OP_XOR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ @ optional op; may set condition codes
+ eor r0, r0, r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/OP_SHL_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asl r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/OP_SHR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, asr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/OP_USHR_INT_LIT8.S */
+/* File: armv5te/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = r0 op r1".
+ * This could be an ARM instruction or a function call. (If the result
+ * comes back in a register other than r0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (r1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ /* binop/lit8 vAA, vBB, #+CC */
+ FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC)
+ mov r9, rINST, lsr #8 @ r9<- AA
+ and r2, r3, #255 @ r2<- BB
+ GET_VREG(r0, r2) @ r0<- vBB
+ movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
+ .if 0
+ @cmp r1, #0 @ is second operand zero?
+ beq common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+
+ and r1, r1, #31 @ optional op; may set condition codes
+ mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+ /* 10-12 instructions */
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: armv5te/OP_IGET_VOLATILE.S */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_VOLATILE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: armv5te/OP_IPUT_VOLATILE.S */
+/* File: armv5te/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_VOLATILE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: armv5te/OP_SGET_VOLATILE.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_VOLATILE_resolve @ yes, do resolve
+.LOP_SGET_VOLATILE_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ SMP_DMB @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: armv5te/OP_SPUT_VOLATILE.S */
+/* File: armv5te/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_VOLATILE_resolve @ yes, do resolve
+.LOP_SPUT_VOLATILE_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SMP_DMB_ST @ releasing store
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ SMP_DMB
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: armv5te/OP_IGET_OBJECT_VOLATILE.S */
+/* File: armv5te/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_OBJECT_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_OBJECT_VOLATILE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: armv5te/OP_IGET_WIDE_VOLATILE.S */
+/* File: armv5te/OP_IGET_WIDE.S */
+ /*
+ * Wide 32-bit instance field get.
+ */
+ /* iget-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IGET_WIDE_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0
+ bne .LOP_IGET_WIDE_VOLATILE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: armv5te/OP_IPUT_WIDE_VOLATILE.S */
+/* File: armv5te/OP_IPUT_WIDE.S */
+ /* iput-wide vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_WIDE_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_WIDE_VOLATILE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: armv5te/OP_SGET_WIDE_VOLATILE.S */
+/* File: armv5te/OP_SGET_WIDE.S */
+ /*
+ * 64-bit SGET handler.
+ */
+ /* sget-wide vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_WIDE_VOLATILE_resolve @ yes, do resolve
+.LOP_SGET_WIDE_VOLATILE_finish:
+ mov r9, rINST, lsr #8 @ r9<- AA
+ .if 1
+ add r0, r0, #offStaticField_value @ r0<- pointer to data
+ bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field
+ .else
+ ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned)
+ .endif
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: armv5te/OP_SPUT_WIDE_VOLATILE.S */
+/* File: armv5te/OP_SPUT_WIDE.S */
+ /*
+ * 64-bit SPUT handler.
+ */
+ /* sput-wide vAA, field@BBBB */
+ ldr r0, [rSELF, #offThread_methodClassDex] @ r0<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r0, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ mov r9, rINST, lsr #8 @ r9<- AA
+ ldr r2, [r10, r1, lsl #2] @ r2<- resolved StaticField ptr
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ cmp r2, #0 @ is resolved entry null?
+ beq .LOP_SPUT_WIDE_VOLATILE_resolve @ yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_finish: @ field ptr in r2, AA in r9
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
+ GET_INST_OPCODE(r10) @ extract opcode from rINST
+ .if 1
+ add r2, r2, #offStaticField_value @ r2<- pointer to data
+ bl dvmQuasiAtomicSwap64Sync @ stores r0/r1 into addr r2
+ .else
+ strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1
+ .endif
+ GOTO_OPCODE(r10) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_BREAKPOINT: /* 0xec */
+/* File: armv5te/OP_BREAKPOINT.S */
+ /*
+ * Breakpoint handler.
+ *
+ * Restart this instruction with the original opcode. By
+ * the time we get here, the breakpoint will have already been
+ * handled.
+ */
+ mov r0, rPC
+ bl dvmGetOriginalOpcode @ (rPC)
+ FETCH(rINST, 0) @ reload OP_BREAKPOINT + rest of inst
+ ldr r1, [rSELF, #offThread_mainHandlerTable]
+ and rINST, #0xff00
+ orr rINST, rINST, r0
+ GOTO_OPCODE_BASE(r1, r0)
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */
+ /*
+ * Handle a throw-verification-error instruction. This throws an
+ * exception for an error discovered during verification. The
+ * exception is indicated by AA, with some detail provided by BBBB.
+ */
+ /* op AA, ref@BBBB */
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ FETCH(r2, 1) @ r2<- BBBB
+ EXPORT_PC() @ export the PC
+ mov r1, rINST, lsr #8 @ r1<- AA
+ bl dvmThrowVerificationError @ always throws
+ b common_exceptionThrown @ handle exception
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/OP_EXECUTE_INLINE.S */
+ /*
+ * Execute a "native inline" instruction.
+ *
+ * We need to call an InlineOp4Func:
+ * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+ *
+ * The first four args are in r0-r3, pointer to return value storage
+ * is on the stack. The function's return value is a flag that tells
+ * us if an exception was thrown.
+ *
+ * TUNING: could maintain two tables, pointer in Thread and
+ * swap if profiler/debuggger active.
+ */
+ /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+ ldrh r2, [rSELF, #offThread_subMode]
+ FETCH(r10, 1) @ r10<- BBBB
+ EXPORT_PC() @ can throw
+ ands r2, #kSubModeDebugProfile @ Any going on?
+ bne .LOP_EXECUTE_INLINE_debugmode @ yes - take slow path
+.LOP_EXECUTE_INLINE_resume:
+ add r1, rSELF, #offThread_retval @ r1<- &self->retval
+ sub sp, sp, #8 @ make room for arg, +64 bit align
+ mov r0, rINST, lsr #12 @ r0<- B
+ str r1, [sp] @ push &self->retval
+ bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ add sp, sp, #8 @ pop stack
+ cmp r0, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */
+ /*
+ * Execute a "native inline" instruction, using "/range" semantics.
+ * Same idea as execute-inline, but we get the args differently.
+ *
+ * We need to call an InlineOp4Func:
+ * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+ *
+ * The first four args are in r0-r3, pointer to return value storage
+ * is on the stack. The function's return value is a flag that tells
+ * us if an exception was thrown.
+ */
+ /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
+ ldrh r2, [rSELF, #offThread_subMode]
+ FETCH(r10, 1) @ r10<- BBBB
+ EXPORT_PC() @ can throw
+ ands r2, #kSubModeDebugProfile @ Any going on?
+ bne .LOP_EXECUTE_INLINE_RANGE_debugmode @ yes - take slow path
+.LOP_EXECUTE_INLINE_RANGE_resume:
+ add r1, rSELF, #offThread_retval @ r1<- &self->retval
+ sub sp, sp, #8 @ make room for arg, +64 bit align
+ mov r0, rINST, lsr #8 @ r0<- AA
+ str r1, [sp] @ push &self->retval
+ bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ add sp, sp, #8 @ pop stack
+ cmp r0, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/OP_INVOKE_OBJECT_INIT_RANGE.S */
+ /*
+ * Invoke Object.<init> on an object. In practice we know that
+ * Object's nullary constructor doesn't do anything, so we just
+ * skip it unless a debugger is active.
+ */
+ FETCH(r1, 2) @ r1<- CCCC
+ GET_VREG(r0, r1) @ r0<- "this" ptr
+ cmp r0, #0 @ check for NULL
+ beq common_errNullObject @ export PC and throw NPE
+ ldr r1, [r0, #offObject_clazz] @ r1<- obj->clazz
+ ldr r2, [r1, #offClassObject_accessFlags] @ r2<- clazz->accessFlags
+ tst r2, #CLASS_ISFINALIZABLE @ is this class finalizable?
+ bne .LOP_INVOKE_OBJECT_INIT_RANGE_setFinal @ yes, go
+.LOP_INVOKE_OBJECT_INIT_RANGE_finish:
+ ldrh r1, [rSELF, #offThread_subMode]
+ ands r1, #kSubModeDebuggerActive @ debugger active?
+ bne .LOP_INVOKE_OBJECT_INIT_RANGE_debugger @ Yes - skip optimization
+ FETCH_ADVANCE_INST(2+1) @ advance to next instr, load rINST
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ GOTO_OPCODE(ip) @ execute it
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/OP_RETURN_VOID_BARRIER.S */
+ SMP_DMB_ST
+ b common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5te/OP_IGET_WIDE_QUICK.S */
+ /* iget-wide-quick vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(ip, 1) @ ip<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned)
+ and r2, r2, #15
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/OP_IGET_OBJECT_QUICK.S */
+/* File: armv5te/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- object we're operating on
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/OP_IPUT_QUICK.S */
+ /* For: iput-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ and r2, r2, #15
+ GET_VREG(r0, r2) @ r0<- fp[A]
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5te/OP_IPUT_WIDE_QUICK.S */
+ /* iput-wide-quick vA, vB, offset@CCCC */
+ mov r0, rINST, lsr #8 @ r0<- A(+)
+ mov r1, rINST, lsr #12 @ r1<- B
+ and r0, r0, #15
+ GET_VREG(r2, r1) @ r2<- fp[B], the object pointer
+ add r3, rFP, r0, lsl #2 @ r3<- &fp[A]
+ cmp r2, #0 @ check object for null
+ ldmia r3, {r0-r1} @ r0/r1<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH(r3, 1) @ r3<- field byte offset
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/OP_IPUT_OBJECT_QUICK.S */
+ /* For: iput-object-quick */
+ /* op vA, vB, offset@CCCC */
+ mov r2, rINST, lsr #12 @ r2<- B
+ GET_VREG(r3, r2) @ r3<- fp[B], the object pointer
+ FETCH(r1, 1) @ r1<- field byte offset
+ cmp r3, #0 @ check object for null
+ mov r2, rINST, lsr #8 @ r2<- A(+)
+ beq common_errNullObject @ object was null
+ and r2, r2, #15
+ GET_VREG(r0, r2) @ r0<- fp[A]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ str r0, [r3, r1] @ obj.field (always 32 bits)<- r0
+ cmp r0, #0
+ strneb r2, [r2, r3, lsr #GC_CARD_SHIFT] @ mark card based on obj head
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r3, 2) @ r3<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!0)
+ and r3, r3, #15 @ r3<- C (or stays CCCC)
+ .endif
+ GET_VREG(r9, r3) @ r9<- vC ("this" ptr)
+ cmp r9, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r2, [r9, #offObject_clazz] @ r2<- thisPtr->clazz
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
+ EXPORT_PC() @ invoke must export
+ ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
+ bl common_invokeMethodNoRange @ (r0=method, r9="this")
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
+/* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r3, 2) @ r3<- FEDC or CCCC
+ FETCH(r1, 1) @ r1<- BBBB
+ .if (!1)
+ and r3, r3, #15 @ r3<- C (or stays CCCC)
+ .endif
+ GET_VREG(r9, r3) @ r9<- vC ("this" ptr)
+ cmp r9, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r2, [r9, #offObject_clazz] @ r2<- thisPtr->clazz
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable
+ EXPORT_PC() @ invoke must export
+ ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB]
+ bl common_invokeMethodRange @ (r0=method, r9="this")
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ .if (!0)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
+ GET_VREG(r9, r10) @ r9<- "this"
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
+ cmp r9, #0 @ null "this" ref?
+ ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
+ beq common_errNullObject @ "this" is null, throw exception
+ bl common_invokeMethodNoRange @ (r0=method, r9="this")
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */
+/* File: armv5te/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(r10, 2) @ r10<- GFED or CCCC
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ .if (!1)
+ and r10, r10, #15 @ r10<- D (or stays CCCC)
+ .endif
+ FETCH(r1, 1) @ r1<- BBBB
+ ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz
+ EXPORT_PC() @ must export for invoke
+ ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super
+ GET_VREG(r9, r10) @ r9<- "this"
+ ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable
+ cmp r9, #0 @ null "this" ref?
+ ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB]
+ beq common_errNullObject @ "this" is null, throw exception
+ bl common_invokeMethodRange @ (r0=method, r9="this")
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: armv5te/OP_IPUT_OBJECT_VOLATILE.S */
+/* File: armv5te/OP_IPUT_OBJECT.S */
+ /*
+ * 32-bit instance field put.
+ *
+ * for: iput-object, iput-object-volatile
+ */
+ /* op vA, vB, field@CCCC */
+ mov r0, rINST, lsr #12 @ r0<- B
+ ldr r3, [rSELF, #offThread_methodClassDex] @ r3<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref CCCC
+ ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields
+ GET_VREG(r9, r0) @ r9<- fp[B], the object pointer
+ ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr
+ cmp r0, #0 @ is resolved entry null?
+ bne .LOP_IPUT_OBJECT_VOLATILE_finish @ no, already resolved
+8: ldr r2, [rSELF, #offThread_method] @ r2<- current method
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveInstField @ r0<- resolved InstField ptr
+ cmp r0, #0 @ success?
+ bne .LOP_IPUT_OBJECT_VOLATILE_finish @ yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: armv5te/OP_SGET_OBJECT_VOLATILE.S */
+/* File: armv5te/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SGET_OBJECT_VOLATILE_resolve @ yes, do resolve
+.LOP_SGET_OBJECT_VOLATILE_finish: @ field ptr in r0
+ ldr r1, [r0, #offStaticField_value] @ r1<- field value
+ SMP_DMB @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r2) @ fp[AA]<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: armv5te/OP_SPUT_OBJECT_VOLATILE.S */
+/* File: armv5te/OP_SPUT_OBJECT.S */
+ /*
+ * 32-bit SPUT handler for objects
+ *
+ * for: sput-object, sput-object-volatile
+ */
+ /* op vAA, field@BBBB */
+ ldr r2, [rSELF, #offThread_methodClassDex] @ r2<- DvmDex
+ FETCH(r1, 1) @ r1<- field ref BBBB
+ ldr r10, [r2, #offDvmDex_pResFields] @ r10<- dvmDex->pResFields
+ ldr r0, [r10, r1, lsl #2] @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ is resolved entry null?
+ beq .LOP_SPUT_OBJECT_VOLATILE_resolve @ yes, do resolve
+.LOP_SPUT_OBJECT_VOLATILE_finish: @ field ptr in r0
+ mov r2, rINST, lsr #8 @ r2<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_VREG(r1, r2) @ r1<- fp[AA]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ ldr r9, [r0, #offField_clazz] @ r9<- field->clazz
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SMP_DMB_ST @ releasing store
+ b .LOP_SPUT_OBJECT_VOLATILE_end
+
+
+/* ------------------------------ */
+ .balign 64
+.L_OP_UNUSED_FF: /* 0xff */
+/* File: armv5te/OP_UNUSED_FF.S */
+/* File: armv5te/unused.S */
+ bl common_abort
+
+
+ .balign 64
+ .size dvmAsmInstructionStart, .-dvmAsmInstructionStart
+ .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global dvmAsmSisterStart
+ .type dvmAsmSisterStart, %function
+ .text
+ .balign 4
+dvmAsmSisterStart:
+
+/* continuation for OP_CONST_STRING */
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * r1: BBBB (String ref)
+ * r9: target register
+ */
+.LOP_CONST_STRING_resolve:
+ EXPORT_PC()
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveString @ r0<- String reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CONST_STRING_JUMBO */
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * r1: BBBBBBBB (String ref)
+ * r9: target register
+ */
+.LOP_CONST_STRING_JUMBO_resolve:
+ EXPORT_PC()
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveString @ r0<- String reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CONST_CLASS */
+
+ /*
+ * Continuation if the Class has not yet been resolved.
+ * r1: BBBB (Class ref)
+ * r9: target register
+ */
+.LOP_CONST_CLASS_resolve:
+ EXPORT_PC()
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ mov r2, #1 @ r2<- true
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- Class reference
+ cmp r0, #0 @ failed?
+ beq common_exceptionThrown @ yup, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r9) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_CHECK_CAST */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * r0 holds obj->clazz
+ * r1 holds desired class resolved from BBBB
+ * r9 holds object
+ */
+.LOP_CHECK_CAST_fullcheck:
+ mov r10, r1 @ avoid ClassObject getting clobbered
+ bl dvmInstanceofNonTrivial @ r0<- boolean result
+ cmp r0, #0 @ failed?
+ bne .LOP_CHECK_CAST_okay @ no, success
+
+ @ A cast has failed. We need to throw a ClassCastException.
+ EXPORT_PC() @ about to throw
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz (actual class)
+ mov r1, r10 @ r1<- desired class
+ bl dvmThrowClassCastException
+ b common_exceptionThrown
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r2 holds BBBB
+ * r9 holds object
+ */
+.LOP_CHECK_CAST_resolve:
+ EXPORT_PC() @ resolve() could throw
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r1, r2 @ r1<- BBBB
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ mov r1, r0 @ r1<- class resolved from BBB
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ b .LOP_CHECK_CAST_resolved @ pick up where we left off
+
+/* continuation for OP_INSTANCE_OF */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * r0 holds obj->clazz
+ * r1 holds class resolved from BBBB
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_fullcheck:
+ bl dvmInstanceofNonTrivial @ r0<- boolean result
+ @ fall through to OP_INSTANCE_OF_store
+
+ /*
+ * r0 holds boolean result
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_store:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Trivial test succeeded, save and bail.
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_trivial:
+ mov r0, #1 @ indicate success
+ @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r0, r9) @ vA<- r0
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r3 holds BBBB
+ * r9 holds A
+ */
+.LOP_INSTANCE_OF_resolve:
+ EXPORT_PC() @ resolve() could throw
+ ldr r0, [rSELF, #offThread_method] @ r0<- self->method
+ mov r1, r3 @ r1<- BBBB
+ mov r2, #1 @ r2<- true
+ ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ beq common_exceptionThrown @ yes, handle exception
+ mov r1, r0 @ r1<- class resolved from BBB
+ mov r3, rINST, lsr #12 @ r3<- B
+ GET_VREG(r0, r3) @ r0<- vB (object)
+ ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz
+ b .LOP_INSTANCE_OF_resolved @ pick up where we left off
+
+/* continuation for OP_NEW_INSTANCE */
+
+ .balign 32 @ minimize cache lines
+.LOP_NEW_INSTANCE_finish: @ r0=new object
+ mov r3, rINST, lsr #8 @ r3<- AA
+ cmp r0, #0 @ failed?
+#if defined(WITH_JIT)
+ /*
+ * The JIT needs the class to be fully resolved before it can
+ * include this instruction in a trace.
+ */
+ ldrh r1, [rSELF, #offThread_subMode]
+ beq common_exceptionThrown @ yes, handle the exception
+ ands r1, #kSubModeJitTraceBuild @ under construction?
+ bne .LOP_NEW_INSTANCE_jitCheck
+#else
+ beq common_exceptionThrown @ yes, handle the exception
+#endif
+.LOP_NEW_INSTANCE_end:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r3) @ vAA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we need to stop the trace building early.
+ * r0: new object
+ * r3: vAA
+ */
+.LOP_NEW_INSTANCE_jitCheck:
+ ldr r1, [r10] @ reload resolved class
+ cmp r1, #0 @ okay?
+ bne .LOP_NEW_INSTANCE_end @ yes, finish
+ mov r9, r0 @ preserve new object
+ mov r10, r3 @ preserve vAA
+ mov r0, rSELF
+ mov r1, rPC
+ bl dvmJitEndTraceSelect @ (self, pc)
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r9, r10) @ vAA<- new object
+ GOTO_OPCODE(ip) @ jump to next instruction
+#endif
+
+ /*
+ * Class initialization required.
+ *
+ * r0 holds class object
+ */
+.LOP_NEW_INSTANCE_needinit:
+ mov r9, r0 @ save r0
+ bl dvmInitClass @ initialize class
+ cmp r0, #0 @ check boolean result
+ mov r0, r9 @ restore r0
+ bne .LOP_NEW_INSTANCE_initialized @ success, continue
+ b common_exceptionThrown @ failed, deal with init exception
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * r1 holds BBBB
+ */
+.LOP_NEW_INSTANCE_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- resolved ClassObject ptr
+ cmp r0, #0 @ got null?
+ bne .LOP_NEW_INSTANCE_resolved @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* continuation for OP_NEW_ARRAY */
+
+
+ /*
+ * Resolve class. (This is an uncommon case.)
+ *
+ * r1 holds array length
+ * r2 holds class ref CCCC
+ */
+.LOP_NEW_ARRAY_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ mov r9, r1 @ r9<- length (save)
+ mov r1, r2 @ r1<- CCCC
+ mov r2, #0 @ r2<- false
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveClass @ r0<- call(clazz, ref)
+ cmp r0, #0 @ got null?
+ mov r1, r9 @ r1<- length (restore)
+ beq common_exceptionThrown @ yes, handle exception
+ @ fall through to OP_NEW_ARRAY_finish
+
+ /*
+ * Finish allocation.
+ *
+ * r0 holds class
+ * r1 holds array length
+ */
+.LOP_NEW_ARRAY_finish:
+ mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table
+ bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags)
+ cmp r0, #0 @ failed?
+ mov r2, rINST, lsr #8 @ r2<- A+
+ beq common_exceptionThrown @ yes, handle the exception
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ vA<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_FILLED_NEW_ARRAY */
+
+ /*
+ * On entry:
+ * r0 holds array class
+ * r10 holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_continue:
+ ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+ mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
+ ldrb rINST, [r3, #1] @ rINST<- descriptor[1]
+ .if 0
+ mov r1, r10 @ r1<- AA (length)
+ .else
+ mov r1, r10, lsr #4 @ r1<- B (length)
+ .endif
+ cmp rINST, #'I' @ array of ints?
+ cmpne rINST, #'L' @ array of objects?
+ cmpne rINST, #'[' @ array of arrays?
+ mov r9, r1 @ save length in r9
+ bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet
+ bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
+ cmp r0, #0 @ null return?
+ beq common_exceptionThrown @ alloc failed, handle exception
+
+ FETCH(r1, 2) @ r1<- FEDC or CCCC
+ str r0, [rSELF, #offThread_retval] @ retval.l <- new array
+ str rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
+ add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+ subs r9, r9, #1 @ length--, check for neg
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ bmi 2f @ was zero, bail
+
+ @ copy values from registers into the array
+ @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+ .if 0
+ add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
+1: ldr r3, [r2], #4 @ r3<- *r2++
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .else
+ cmp r9, #4 @ length was initially 5?
+ and r2, r10, #15 @ r2<- A
+ bne 1f @ <= 4 args, branch
+ GET_VREG(r3, r2) @ r3<- vA
+ sub r9, r9, #1 @ count--
+ str r3, [r0, #16] @ contents[4] = vA
+1: and r2, r1, #15 @ r2<- F/E/D/C
+ GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
+ mov r1, r1, lsr #4 @ r1<- next reg in low 4
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .endif
+
+2:
+ ldr r0, [rSELF, #offThread_retval] @ r0<- object
+ ldr r1, [rSELF, #offThread_retval+4] @ r1<- type
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ cmp r1, #'I' @ Is int array?
+ strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
+ GOTO_OPCODE(ip) @ execute it
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_notimpl:
+ ldr r0, .L_strFilledNewArrayNotImpl_OP_FILLED_NEW_ARRAY
+ bl dvmThrowInternalError
+ b common_exceptionThrown
+
+ /*
+ * Ideally we'd only define this once, but depending on layout we can
+ * exceed the range of the load above.
+ */
+
+.L_strFilledNewArrayNotImpl_OP_FILLED_NEW_ARRAY:
+ .word .LstrFilledNewArrayNotImpl
+
+/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
+
+ /*
+ * On entry:
+ * r0 holds array class
+ * r10 holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_continue:
+ ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor
+ mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags
+ ldrb rINST, [r3, #1] @ rINST<- descriptor[1]
+ .if 1
+ mov r1, r10 @ r1<- AA (length)
+ .else
+ mov r1, r10, lsr #4 @ r1<- B (length)
+ .endif
+ cmp rINST, #'I' @ array of ints?
+ cmpne rINST, #'L' @ array of objects?
+ cmpne rINST, #'[' @ array of arrays?
+ mov r9, r1 @ save length in r9
+ bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet
+ bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags)
+ cmp r0, #0 @ null return?
+ beq common_exceptionThrown @ alloc failed, handle exception
+
+ FETCH(r1, 2) @ r1<- FEDC or CCCC
+ str r0, [rSELF, #offThread_retval] @ retval.l <- new array
+ str rINST, [rSELF, #offThread_retval+4] @ retval.h <- type
+ add r0, r0, #offArrayObject_contents @ r0<- newArray->contents
+ subs r9, r9, #1 @ length--, check for neg
+ FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST
+ bmi 2f @ was zero, bail
+
+ @ copy values from registers into the array
+ @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA
+ .if 1
+ add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC]
+1: ldr r3, [r2], #4 @ r3<- *r2++
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .else
+ cmp r9, #4 @ length was initially 5?
+ and r2, r10, #15 @ r2<- A
+ bne 1f @ <= 4 args, branch
+ GET_VREG(r3, r2) @ r3<- vA
+ sub r9, r9, #1 @ count--
+ str r3, [r0, #16] @ contents[4] = vA
+1: and r2, r1, #15 @ r2<- F/E/D/C
+ GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC
+ mov r1, r1, lsr #4 @ r1<- next reg in low 4
+ subs r9, r9, #1 @ count--
+ str r3, [r0], #4 @ *contents++ = vX
+ bpl 1b
+ @ continue at 2
+ .endif
+
+2:
+ ldr r0, [rSELF, #offThread_retval] @ r0<- object
+ ldr r1, [rSELF, #offThread_retval+4] @ r1<- type
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ GET_INST_OPCODE(ip) @ ip<- opcode from rINST
+ cmp r1, #'I' @ Is int array?
+ strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head
+ GOTO_OPCODE(ip) @ execute it
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
+ ldr r0, .L_strFilledNewArrayNotImpl_OP_FILLED_NEW_ARRAY_RANGE
+ bl dvmThrowInternalError
+ b common_exceptionThrown
+
+ /*
+ * Ideally we'd only define this once, but depending on layout we can
+ * exceed the range of the load above.
+ */
+
+.L_strFilledNewArrayNotImpl_OP_FILLED_NEW_ARRAY_RANGE:
+ .word .LstrFilledNewArrayNotImpl
+
+/* continuation for OP_CMPL_FLOAT */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPL_FLOAT_gt_or_nan:
+ mov r1, r9 @ reverse order
+ mov r0, r10
+ bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPL_FLOAT_finish
+ mvn r1, #0 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPL_FLOAT_finish
+
+
+#if 0 /* "clasic" form */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpeq @ r0<- (vBB == vCC)
+ cmp r0, #0 @ equal?
+ movne r1, #0 @ yes, result is 0
+ bne OP_CMPL_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmplt @ r0<- (vBB < vCC)
+ cmp r0, #0 @ less than?
+ b OP_CMPL_FLOAT_continue
+@%break
+
+OP_CMPL_FLOAT_continue:
+ mvnne r1, #0 @ yes, result is -1
+ bne OP_CMPL_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpgt @ r0<- (vBB > vCC)
+ cmp r0, #0 @ greater than?
+ beq OP_CMPL_FLOAT_nan @ no, must be NaN
+ mov r1, #1 @ yes, result is 1
+ @ fall through to _finish
+
+OP_CMPL_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * This is expected to be uncommon, so we double-branch (once to here,
+ * again back to _finish).
+ */
+OP_CMPL_FLOAT_nan:
+ mvn r1, #0 @ r1<- 1 or -1 for NaN
+ b OP_CMPL_FLOAT_finish
+
+#endif
+
+/* continuation for OP_CMPG_FLOAT */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPG_FLOAT_gt_or_nan:
+ mov r1, r9 @ reverse order
+ mov r0, r10
+ bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPG_FLOAT_finish
+ mov r1, #1 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPG_FLOAT_finish
+
+
+#if 0 /* "clasic" form */
+ FETCH(r0, 1) @ r0<- CCBB
+ and r2, r0, #255 @ r2<- BB
+ mov r3, r0, lsr #8 @ r3<- CC
+ GET_VREG(r9, r2) @ r9<- vBB
+ GET_VREG(r10, r3) @ r10<- vCC
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpeq @ r0<- (vBB == vCC)
+ cmp r0, #0 @ equal?
+ movne r1, #0 @ yes, result is 0
+ bne OP_CMPG_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmplt @ r0<- (vBB < vCC)
+ cmp r0, #0 @ less than?
+ b OP_CMPG_FLOAT_continue
+@%break
+
+OP_CMPG_FLOAT_continue:
+ mvnne r1, #0 @ yes, result is -1
+ bne OP_CMPG_FLOAT_finish
+ mov r0, r9 @ r0<- vBB
+ mov r1, r10 @ r1<- vCC
+ bl __aeabi_fcmpgt @ r0<- (vBB > vCC)
+ cmp r0, #0 @ greater than?
+ beq OP_CMPG_FLOAT_nan @ no, must be NaN
+ mov r1, #1 @ yes, result is 1
+ @ fall through to _finish
+
+OP_CMPG_FLOAT_finish:
+ mov r3, rINST, lsr #8 @ r3<- AA
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r3) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /*
+ * This is expected to be uncommon, so we double-branch (once to here,
+ * again back to _finish).
+ */
+OP_CMPG_FLOAT_nan:
+ mov r1, #1 @ r1<- 1 or -1 for NaN
+ b OP_CMPG_FLOAT_finish
+
+#endif
+
+/* continuation for OP_CMPL_DOUBLE */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPL_DOUBLE_gt_or_nan:
+ ldmia r10, {r0-r1} @ reverse order
+ ldmia r9, {r2-r3}
+ bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPL_DOUBLE_finish
+ mvn r1, #0 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPL_DOUBLE_finish
+
+/* continuation for OP_CMPG_DOUBLE */
+
+ @ Test for NaN with a second comparison. EABI forbids testing bit
+ @ patterns, and we can't represent 0x7fc00000 in immediate form, so
+ @ make the library call.
+.LOP_CMPG_DOUBLE_gt_or_nan:
+ ldmia r10, {r0-r1} @ reverse order
+ ldmia r9, {r2-r3}
+ bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if <
+ @bleq common_abort
+ movcc r1, #1 @ (greater than) r1<- 1
+ bcc .LOP_CMPG_DOUBLE_finish
+ mov r1, #1 @ r1<- 1 or -1 for NaN
+ b .LOP_CMPG_DOUBLE_finish
+
+/* continuation for OP_CMP_LONG */
+
+.LOP_CMP_LONG_less:
+ mvn r1, #0 @ r1<- -1
+ @ Want to cond code the next mov so we can avoid branch, but don't see it;
+ @ instead, we just replicate the tail end.
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+.LOP_CMP_LONG_greater:
+ mov r1, #1 @ r1<- 1
+ @ fall through to _finish
+
+.LOP_CMP_LONG_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ SET_VREG(r1, r9) @ vAA<- r1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_AGET_WIDE */
+
+.LOP_AGET_WIDE_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC]
+ add r9, rFP, r9, lsl #2 @ r9<- &fp[AA]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_APUT_WIDE */
+
+.LOP_APUT_WIDE_finish:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC]
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_APUT_OBJECT */
+ /*
+ * On entry:
+ * rINST = vBB (arrayObj)
+ * r9 = vAA (obj)
+ * r10 = offset into array (vBB + vCC * width)
+ */
+.LOP_APUT_OBJECT_finish:
+ cmp r9, #0 @ storing null reference?
+ beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks
+ ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz
+ ldr r1, [rINST, #offObject_clazz] @ r1<- arrayObj->clazz
+ bl dvmCanPutArrayElement @ test object type vs. array type
+ cmp r0, #0 @ okay?
+ beq .LOP_APUT_OBJECT_throw @ no
+ mov r1, rINST @ r1<- arrayObj
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldr r2, [rSELF, #offThread_cardTable] @ get biased CT base
+ add r10, #offArrayObject_contents @ r0<- pointer to slot
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r9, [r10] @ vBB[vCC]<- vAA
+ strb r2, [r2, r1, lsr #GC_CARD_SHIFT] @ mark card using object head
+ GOTO_OPCODE(ip) @ jump to next instruction
+.LOP_APUT_OBJECT_skip_check:
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA
+ GOTO_OPCODE(ip) @ jump to next instruction
+.LOP_APUT_OBJECT_throw:
+ @ The types don't match. We need to throw an ArrayStoreException.
+ ldr r0, [r9, #offObject_clazz]
+ ldr r1, [rINST, #offObject_clazz]
+ EXPORT_PC()
+ bl dvmThrowArrayStoreExceptionIncompatibleElement
+ b common_exceptionThrown
+
+/* continuation for OP_IGET */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_WIDE_finish:
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ .if 0
+ add r0, r9, r3 @ r0<- address of field
+ bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field
+ .else
+ ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok)
+ .endif
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_OBJECT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_OBJECT_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_BOOLEAN */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_BOOLEAN_finish:
+ @bl common_squeak1
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_BYTE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_BYTE_finish:
+ @bl common_squeak2
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_CHAR */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_CHAR_finish:
+ @bl common_squeak3
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_SHORT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_SHORT_finish:
+ @bl common_squeak4
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ @ no-op @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_WIDE_finish:
+ mov r2, rINST, lsr #8 @ r2<- A+
+ cmp r9, #0 @ check object for null
+ and r2, r2, #15 @ r2<- A
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ add r2, rFP, r2, lsl #2 @ r3<- &fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r2, {r0-r1} @ r0/r1<- fp[A]
+ GET_INST_OPCODE(r10) @ extract opcode from rINST
+ .if 0
+ add r2, r9, r3 @ r2<- target address
+ bl dvmQuasiAtomicSwap64Sync @ stores r0/r1 into addr r2
+ .else
+ strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1
+ .endif
+ GOTO_OPCODE(r10) @ jump to next instruction
+
+/* continuation for OP_IPUT_OBJECT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_OBJECT_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (32 bits)<- r0
+ @ no-op
+ cmp r0, #0 @ stored a null reference?
+ strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_BOOLEAN */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_BOOLEAN_finish:
+ @bl common_squeak1
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_BYTE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_BYTE_finish:
+ @bl common_squeak2
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_CHAR */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_CHAR_finish:
+ @bl common_squeak3
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_SHORT */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_SHORT_finish:
+ @bl common_squeak4
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ @ no-op @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ @ no-op
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SGET */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_finish
+
+/* continuation for OP_SGET_WIDE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in r0.
+ */
+.LOP_SGET_WIDE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r1<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_WIDE_finish @ resume
+
+/* continuation for OP_SGET_OBJECT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_OBJECT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_OBJECT_finish
+
+/* continuation for OP_SGET_BOOLEAN */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_BOOLEAN_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_BOOLEAN_finish
+
+/* continuation for OP_SGET_BYTE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_BYTE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_BYTE_finish
+
+/* continuation for OP_SGET_CHAR */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_CHAR_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_CHAR_finish
+
+/* continuation for OP_SGET_SHORT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_SHORT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_SHORT_finish
+
+/* continuation for OP_SPUT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_finish @ resume
+
+/* continuation for OP_SPUT_WIDE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r9: &fp[AA]
+ * r10: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in r2.
+ */
+.LOP_SPUT_WIDE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ mov r2, r0 @ copy to r2
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_WIDE_finish @ resume
+
+/* continuation for OP_SPUT_OBJECT */
+
+
+.LOP_SPUT_OBJECT_end:
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ @ no-op
+ cmp r1, #0 @ stored a null object?
+ strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card based on obj head
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /* Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_OBJECT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_OBJECT_finish @ resume
+
+
+/* continuation for OP_SPUT_BOOLEAN */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_BOOLEAN_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_BOOLEAN_finish @ resume
+
+/* continuation for OP_SPUT_BYTE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_BYTE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_BYTE_finish @ resume
+
+/* continuation for OP_SPUT_CHAR */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_CHAR_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_CHAR_finish @ resume
+
+/* continuation for OP_SPUT_SHORT */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_SHORT_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_SHORT_finish @ resume
+
+/* continuation for OP_INVOKE_VIRTUAL */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_continue:
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ cmp r9, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r3, [r9, #offObject_clazz] @ r3<- thisPtr->clazz
+ ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
+ ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodNoRange @ (r0=method, r9="this")
+
+/* continuation for OP_INVOKE_SUPER */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = method->clazz
+ */
+.LOP_INVOKE_SUPER_continue:
+ ldr r1, [r10, #offClassObject_super] @ r1<- method->clazz->super
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
+ EXPORT_PC() @ must export for invoke
+ cmp r2, r3 @ compare (methodIndex, vtableCount)
+ bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass
+ ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
+ ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodNoRange @ continue on
+
+.LOP_INVOKE_SUPER_resolve:
+ mov r0, r10 @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_SUPER_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * r0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_nsm:
+ ldr r1, [r0, #offMethod_name] @ r1<- method name
+ b common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT */
+
+ /*
+ * On entry:
+ * r1 = reference (BBBB or CCCC)
+ * r10 = "this" register
+ */
+.LOP_INVOKE_DIRECT_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_DIRECT @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_DIRECT_finish @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* continuation for OP_INVOKE_STATIC */
+
+
+.LOP_INVOKE_STATIC_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_STATIC @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we're actively building a trace. If so,
+ * we need to keep this instruction out of it.
+ * r10: &resolved_methodToCall
+ */
+ ldrh r2, [rSELF, #offThread_subMode]
+ beq common_exceptionThrown @ null, handle exception
+ ands r2, #kSubModeJitTraceBuild @ trace under construction?
+ beq common_invokeMethodNoRange @ no (r0=method, r9="this")
+ ldr r1, [r10] @ reload resolved method
+ cmp r1, #0 @ finished resolving?
+ bne common_invokeMethodNoRange @ yes (r0=method, r9="this")
+ mov r10, r0 @ preserve method
+ mov r0, rSELF
+ mov r1, rPC
+ bl dvmJitEndTraceSelect @ (self, pc)
+ mov r0, r10
+ b common_invokeMethodNoRange @ whew, finally!
+#else
+ bne common_invokeMethodNoRange @ (r0=method, r9="this")
+ b common_exceptionThrown @ yes, handle exception
+#endif
+
+/* continuation for OP_INVOKE_VIRTUAL_RANGE */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_RANGE_continue:
+ GET_VREG(r9, r10) @ r9<- "this" ptr
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ cmp r9, #0 @ is "this" null?
+ beq common_errNullObject @ null "this", throw exception
+ ldr r3, [r9, #offObject_clazz] @ r3<- thisPtr->clazz
+ ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable
+ ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodRange @ (r0=method, r9="this")
+
+/* continuation for OP_INVOKE_SUPER_RANGE */
+
+ /*
+ * At this point:
+ * r0 = resolved base method
+ * r10 = method->clazz
+ */
+.LOP_INVOKE_SUPER_RANGE_continue:
+ ldr r1, [r10, #offClassObject_super] @ r1<- method->clazz->super
+ ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex
+ ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount
+ EXPORT_PC() @ must export for invoke
+ cmp r2, r3 @ compare (methodIndex, vtableCount)
+ bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass
+ ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable
+ ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex]
+ bl common_invokeMethodRange @ continue on
+
+.LOP_INVOKE_SUPER_RANGE_resolve:
+ mov r0, r10 @ r0<- method->clazz
+ mov r2, #METHOD_VIRTUAL @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * r0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_RANGE_nsm:
+ ldr r1, [r0, #offMethod_name] @ r1<- method name
+ b common_errNoSuchMethod
+
+/* continuation for OP_INVOKE_DIRECT_RANGE */
+
+ /*
+ * On entry:
+ * r1 = reference (BBBB or CCCC)
+ * r10 = "this" register
+ */
+.LOP_INVOKE_DIRECT_RANGE_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_DIRECT @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+ bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue
+ b common_exceptionThrown @ yes, handle exception
+
+/* continuation for OP_INVOKE_STATIC_RANGE */
+
+
+.LOP_INVOKE_STATIC_RANGE_resolve:
+ ldr r3, [rSELF, #offThread_method] @ r3<- self->method
+ ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz
+ mov r2, #METHOD_STATIC @ resolver method type
+ bl dvmResolveMethod @ r0<- call(clazz, ref, flags)
+ cmp r0, #0 @ got null?
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we're actively building a trace. If so,
+ * we need to keep this instruction out of it.
+ * r10: &resolved_methodToCall
+ */
+ ldrh r2, [rSELF, #offThread_subMode]
+ beq common_exceptionThrown @ null, handle exception
+ ands r2, #kSubModeJitTraceBuild @ trace under construction?
+ beq common_invokeMethodRange @ no (r0=method, r9="this")
+ ldr r1, [r10] @ reload resolved method
+ cmp r1, #0 @ finished resolving?
+ bne common_invokeMethodRange @ yes (r0=method, r9="this")
+ mov r10, r0 @ preserve method
+ mov r0, rSELF
+ mov r1, rPC
+ bl dvmJitEndTraceSelect @ (self, pc)
+ mov r0, r10
+ b common_invokeMethodRange @ whew, finally!
+#else
+ bne common_invokeMethodRange @ (r0=method, r9="this")
+ b common_exceptionThrown @ yes, handle exception
+#endif
+
+/* continuation for OP_FLOAT_TO_LONG */
+/*
+ * Convert the float in r0 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+f2l_doconv:
+ stmfd sp!, {r4, lr}
+ mov r1, #0x5f000000 @ (float)maxlong
+ mov r4, r0
+ bl __aeabi_fcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffff)
+ mvnne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, #0xdf000000 @ (float)minlong
+ bl __aeabi_fcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (80000000)
+ movne r1, #0x80000000
+ ldmnefd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ mov r1, r4
+ bl __aeabi_fcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ ldmeqfd sp!, {r4, pc}
+
+ mov r0, r4 @ recover arg
+ bl __aeabi_f2lz @ convert float to long
+ ldmfd sp!, {r4, pc}
+
+/* continuation for OP_DOUBLE_TO_LONG */
+/*
+ * Convert the double in r0/r1 to a long in r0/r1.
+ *
+ * We have to clip values to long min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ */
+d2l_doconv:
+ stmfd sp!, {r4, r5, lr} @ save regs
+ mov r3, #0x43000000 @ maxlong, as a double (high word)
+ add r3, #0x00e00000 @ 0x43e00000
+ mov r2, #0 @ maxlong, as a double (low word)
+ sub sp, sp, #4 @ align for EABI
+ mov r4, r0 @ save a copy of r0
+ mov r5, r1 @ and r1
+ bl __aeabi_dcmpge @ is arg >= maxlong?
+ cmp r0, #0 @ nonzero == yes
+ mvnne r0, #0 @ return maxlong (7fffffffffffffff)
+ mvnne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r3, #0xc3000000 @ minlong, as a double (high word)
+ add r3, #0x00e00000 @ 0xc3e00000
+ mov r2, #0 @ minlong, as a double (low word)
+ bl __aeabi_dcmple @ is arg <= minlong?
+ cmp r0, #0 @ nonzero == yes
+ movne r0, #0 @ return minlong (8000000000000000)
+ movne r1, #0x80000000
+ bne 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ mov r2, r4 @ compare against self
+ mov r3, r5
+ bl __aeabi_dcmpeq @ is arg == self?
+ cmp r0, #0 @ zero == no
+ moveq r1, #0 @ return zero for NaN
+ beq 1f
+
+ mov r0, r4 @ recover arg
+ mov r1, r5
+ bl __aeabi_d2lz @ convert double to long
+
+1:
+ add sp, sp, #4
+ ldmfd sp!, {r4, r5, pc}
+
+/* continuation for OP_MUL_LONG */
+
+.LOP_MUL_LONG_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SHL_LONG */
+
+.LOP_SHL_LONG_finish:
+ mov r0, r0, asl r2 @ r0<- r0 << r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SHR_LONG */
+
+.LOP_SHR_LONG_finish:
+ mov r1, r1, asr r2 @ r1<- r1 >> r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_USHR_LONG */
+
+.LOP_USHR_LONG_finish:
+ mov r1, r1, lsr r2 @ r1<- r1 >>> r2
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SHL_LONG_2ADDR */
+
+.LOP_SHL_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SHR_LONG_2ADDR */
+
+.LOP_SHR_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_USHR_LONG_2ADDR */
+
+.LOP_USHR_LONG_2ADDR_finish:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_VOLATILE_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ SMP_DMB @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_VOLATILE_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SMP_DMB_ST @ releasing store
+ str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0
+ SMP_DMB
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SGET_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_VOLATILE_finish
+
+/* continuation for OP_SPUT_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_VOLATILE_finish @ resume
+
+/* continuation for OP_IGET_OBJECT_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_OBJECT_VOLATILE_finish:
+ @bl common_squeak0
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits)
+ SMP_DMB @ acquiring load
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SET_VREG(r0, r2) @ fp[A]<- r0
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IGET_WIDE_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IGET_WIDE_VOLATILE_finish:
+ cmp r9, #0 @ check object for null
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ beq common_errNullObject @ object was null
+ .if 1
+ add r0, r9, r3 @ r0<- address of field
+ bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field
+ .else
+ ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok)
+ .endif
+ mov r2, rINST, lsr #8 @ r2<- A+
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ and r2, r2, #15 @ r2<- A
+ add r3, rFP, r2, lsl #2 @ r3<- &fp[A]
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ stmia r3, {r0-r1} @ fp[A]<- r0/r1
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_IPUT_WIDE_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_WIDE_VOLATILE_finish:
+ mov r2, rINST, lsr #8 @ r2<- A+
+ cmp r9, #0 @ check object for null
+ and r2, r2, #15 @ r2<- A
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ add r2, rFP, r2, lsl #2 @ r3<- &fp[A]
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ ldmia r2, {r0-r1} @ r0/r1<- fp[A]
+ GET_INST_OPCODE(r10) @ extract opcode from rINST
+ .if 1
+ add r2, r9, r3 @ r2<- target address
+ bl dvmQuasiAtomicSwap64Sync @ stores r0/r1 into addr r2
+ .else
+ strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1
+ .endif
+ GOTO_OPCODE(r10) @ jump to next instruction
+
+/* continuation for OP_SGET_WIDE_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in r0.
+ */
+.LOP_SGET_WIDE_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r1<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_WIDE_VOLATILE_finish @ resume
+
+/* continuation for OP_SPUT_WIDE_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r9: &fp[AA]
+ * r10: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in r2.
+ */
+.LOP_SPUT_WIDE_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ mov r2, r0 @ copy to r2
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_WIDE_VOLATILE_finish @ resume
+
+/* continuation for OP_EXECUTE_INLINE */
+
+ /*
+ * Extract args, call function.
+ * r0 = #of args (0-4)
+ * r10 = call index
+ * lr = return addr, above [DO NOT bl out of here w/o preserving LR]
+ *
+ * Other ideas:
+ * - Use a jump table from the main piece to jump directly into the
+ * AND/LDR pairs. Costs a data load, saves a branch.
+ * - Have five separate pieces that do the loading, so we can work the
+ * interleave a little better. Increases code size.
+ */
+.LOP_EXECUTE_INLINE_continue:
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(rINST, 2) @ rINST<- FEDC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: and ip, rINST, #0xf000 @ isolate F
+ ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2)
+3: and ip, rINST, #0x0f00 @ isolate E
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vE
+2: and ip, rINST, #0x00f0 @ isolate D
+ ldr r1, [rFP, ip, lsr #2] @ r1<- vD
+1: and ip, rINST, #0x000f @ isolate C
+ ldr r0, [rFP, ip, lsl #2] @ r0<- vC
+0:
+ ldr rINST, .LOP_EXECUTE_INLINE_table @ table of InlineOperation
+ ldr pc, [rINST, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+ /*
+ * We're debugging or profiling.
+ * r10: opIndex
+ */
+.LOP_EXECUTE_INLINE_debugmode:
+ mov r0, r10
+ bl dvmResolveInlineNative
+ cmp r0, #0 @ did it resolve?
+ beq .LOP_EXECUTE_INLINE_resume @ no, just move on
+ mov r9, r0 @ remember method
+ mov r1, rSELF
+ bl dvmFastMethodTraceEnter @ (method, self)
+ add r1, rSELF, #offThread_retval@ r1<- &self->retval
+ sub sp, sp, #8 @ make room for arg, +64 bit align
+ mov r0, rINST, lsr #12 @ r0<- B
+ str r1, [sp] @ push &self->retval
+ bl .LOP_EXECUTE_INLINE_continue @ make call; will return after
+ mov rINST, r0 @ save result of inline
+ add sp, sp, #8 @ pop stack
+ mov r0, r9 @ r0<- method
+ mov r1, rSELF
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp rINST, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+
+.LOP_EXECUTE_INLINE_table:
+ .word gDvmInlineOpsTable
+
+/* continuation for OP_EXECUTE_INLINE_RANGE */
+
+ /*
+ * Extract args, call function.
+ * r0 = #of args (0-4)
+ * r10 = call index
+ * lr = return addr, above [DO NOT bl out of here w/o preserving LR]
+ */
+.LOP_EXECUTE_INLINE_RANGE_continue:
+ rsb r0, r0, #4 @ r0<- 4-r0
+ FETCH(r9, 2) @ r9<- CCCC
+ add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+4: add ip, r9, #3 @ base+3
+ GET_VREG(r3, ip) @ r3<- vBase[3]
+3: add ip, r9, #2 @ base+2
+ GET_VREG(r2, ip) @ r2<- vBase[2]
+2: add ip, r9, #1 @ base+1
+ GET_VREG(r1, ip) @ r1<- vBase[1]
+1: add ip, r9, #0 @ (nop)
+ GET_VREG(r0, ip) @ r0<- vBase[0]
+0:
+ ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation
+ ldr pc, [r9, r10, lsl #4] @ sizeof=16, "func" is first entry
+ @ (not reached)
+
+
+ /*
+ * We're debugging or profiling.
+ * r10: opIndex
+ */
+.LOP_EXECUTE_INLINE_RANGE_debugmode:
+ mov r0, r10
+ bl dvmResolveInlineNative
+ cmp r0, #0 @ did it resolve?
+ beq .LOP_EXECUTE_INLINE_RANGE_resume @ no, just move on
+ mov r9, r0 @ remember method
+ mov r1, rSELF
+ bl dvmFastMethodTraceEnter @ (method, self)
+ add r1, rSELF, #offThread_retval@ r1<- &self->retval
+ sub sp, sp, #8 @ make room for arg, +64 bit align
+ mov r0, rINST, lsr #8 @ r0<- B
+ mov rINST, r9 @ rINST<- method
+ str r1, [sp] @ push &self->retval
+ bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after
+ mov r9, r0 @ save result of inline
+ add sp, sp, #8 @ pop stack
+ mov r0, rINST @ r0<- method
+ mov r1, rSELF
+ bl dvmFastNativeMethodTraceExit @ (method, self)
+ cmp r9, #0 @ test boolean result of inline
+ beq common_exceptionThrown @ returned false, handle exception
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+
+
+
+.LOP_EXECUTE_INLINE_RANGE_table:
+ .word gDvmInlineOpsTable
+
+
+/* continuation for OP_INVOKE_OBJECT_INIT_RANGE */
+
+.LOP_INVOKE_OBJECT_INIT_RANGE_setFinal:
+ EXPORT_PC() @ can throw
+ bl dvmSetFinalizable @ call dvmSetFinalizable(obj)
+ ldr r0, [rSELF, #offThread_exception] @ r0<- self->exception
+ cmp r0, #0 @ exception pending?
+ bne common_exceptionThrown @ yes, handle it
+ b .LOP_INVOKE_OBJECT_INIT_RANGE_finish
+
+ /*
+ * A debugger is attached, so we need to go ahead and do
+ * this. For simplicity, we'll just jump directly to the
+ * corresponding handler. Note that we can't use
+ * rIBASE here because it may be in single-step mode.
+ * Load the primary table base directly.
+ */
+.LOP_INVOKE_OBJECT_INIT_RANGE_debugger:
+ ldr r1, [rSELF, #offThread_mainHandlerTable]
+ mov ip, #OP_INVOKE_DIRECT_RANGE
+ GOTO_OPCODE_BASE(r1,ip) @ execute it
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE */
+
+ /*
+ * Currently:
+ * r0 holds resolved field
+ * r9 holds object
+ */
+.LOP_IPUT_OBJECT_VOLATILE_finish:
+ @bl common_squeak0
+ mov r1, rINST, lsr #8 @ r1<- A+
+ ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field
+ and r1, r1, #15 @ r1<- A
+ cmp r9, #0 @ check object for null
+ GET_VREG(r0, r1) @ r0<- fp[A]
+ ldr r2, [rSELF, #offThread_cardTable] @ r2<- card table base
+ beq common_errNullObject @ object was null
+ FETCH_ADVANCE_INST(2) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ SMP_DMB_ST @ releasing store
+ str r0, [r9, r3] @ obj.field (32 bits)<- r0
+ SMP_DMB
+ cmp r0, #0 @ stored a null reference?
+ strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/* continuation for OP_SGET_OBJECT_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SGET_OBJECT_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SGET_OBJECT_VOLATILE_finish
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE */
+
+
+.LOP_SPUT_OBJECT_VOLATILE_end:
+ str r1, [r0, #offStaticField_value] @ field<- vAA
+ SMP_DMB
+ cmp r1, #0 @ stored a null object?
+ strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card based on obj head
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ /* Continuation if the field has not yet been resolved.
+ * r1: BBBB field ref
+ * r10: dvmDex->pResFields
+ */
+.LOP_SPUT_OBJECT_VOLATILE_resolve:
+ ldr r2, [rSELF, #offThread_method] @ r2<- current method
+#if defined(WITH_JIT)
+ add r10, r10, r1, lsl #2 @ r10<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() @ resolve() could throw, so export now
+ ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz
+ bl dvmResolveStaticField @ r0<- resolved StaticField ptr
+ cmp r0, #0 @ success?
+ beq common_exceptionThrown @ no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ bl common_verifyField
+#endif
+ b .LOP_SPUT_OBJECT_VOLATILE_finish @ resume
+
+
+ .size dvmAsmSisterStart, .-dvmAsmSisterStart
+ .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
+
+ .global dvmAsmAltInstructionStart
+ .type dvmAsmAltInstructionStart, %function
+ .text
+
+dvmAsmAltInstructionStart = .L_ALT_OP_NOP
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NOP: /* 0x00 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (0 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE: /* 0x01 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (1 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_FROM16: /* 0x02 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (2 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_16: /* 0x03 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (3 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_WIDE: /* 0x04 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (4 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (5 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (6 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_OBJECT: /* 0x07 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (7 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (8 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (9 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_RESULT: /* 0x0a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (10 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (11 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (12 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (13 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN_VOID: /* 0x0e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (14 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN: /* 0x0f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (15 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN_WIDE: /* 0x10 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (16 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN_OBJECT: /* 0x11 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (17 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_4: /* 0x12 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (18 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_16: /* 0x13 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (19 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST: /* 0x14 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (20 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_HIGH16: /* 0x15 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (21 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_WIDE_16: /* 0x16 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (22 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_WIDE_32: /* 0x17 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (23 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_WIDE: /* 0x18 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (24 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (25 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_STRING: /* 0x1a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (26 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (27 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CONST_CLASS: /* 0x1c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (28 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MONITOR_ENTER: /* 0x1d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (29 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MONITOR_EXIT: /* 0x1e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (30 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CHECK_CAST: /* 0x1f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (31 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INSTANCE_OF: /* 0x20 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (32 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (33 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEW_INSTANCE: /* 0x22 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (34 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEW_ARRAY: /* 0x23 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (35 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (36 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (37 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (38 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_THROW: /* 0x27 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (39 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_GOTO: /* 0x28 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (40 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_GOTO_16: /* 0x29 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (41 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_GOTO_32: /* 0x2a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (42 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_PACKED_SWITCH: /* 0x2b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (43 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (44 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMPL_FLOAT: /* 0x2d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (45 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMPG_FLOAT: /* 0x2e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (46 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (47 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (48 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_CMP_LONG: /* 0x31 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (49 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_EQ: /* 0x32 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (50 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_NE: /* 0x33 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (51 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_LT: /* 0x34 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (52 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_GE: /* 0x35 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (53 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_GT: /* 0x36 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (54 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_LE: /* 0x37 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (55 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_EQZ: /* 0x38 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (56 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_NEZ: /* 0x39 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (57 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_LTZ: /* 0x3a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (58 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_GEZ: /* 0x3b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (59 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_GTZ: /* 0x3c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (60 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IF_LEZ: /* 0x3d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (61 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_3E: /* 0x3e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (62 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_3F: /* 0x3f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (63 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_40: /* 0x40 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (64 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_41: /* 0x41 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (65 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_42: /* 0x42 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (66 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_43: /* 0x43 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (67 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET: /* 0x44 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (68 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_WIDE: /* 0x45 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (69 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_OBJECT: /* 0x46 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (70 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (71 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_BYTE: /* 0x48 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (72 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_CHAR: /* 0x49 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (73 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AGET_SHORT: /* 0x4a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (74 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT: /* 0x4b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (75 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_WIDE: /* 0x4c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (76 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_OBJECT: /* 0x4d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (77 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (78 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_BYTE: /* 0x4f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (79 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_CHAR: /* 0x50 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (80 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_APUT_SHORT: /* 0x51 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (81 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET: /* 0x52 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (82 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_WIDE: /* 0x53 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (83 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_OBJECT: /* 0x54 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (84 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (85 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_BYTE: /* 0x56 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (86 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_CHAR: /* 0x57 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (87 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_SHORT: /* 0x58 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (88 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT: /* 0x59 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (89 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_WIDE: /* 0x5a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (90 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_OBJECT: /* 0x5b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (91 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (92 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_BYTE: /* 0x5d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (93 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_CHAR: /* 0x5e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (94 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_SHORT: /* 0x5f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (95 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET: /* 0x60 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (96 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_WIDE: /* 0x61 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (97 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_OBJECT: /* 0x62 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (98 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (99 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_BYTE: /* 0x64 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (100 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_CHAR: /* 0x65 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (101 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_SHORT: /* 0x66 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (102 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT: /* 0x67 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (103 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_WIDE: /* 0x68 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (104 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_OBJECT: /* 0x69 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (105 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (106 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_BYTE: /* 0x6b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (107 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_CHAR: /* 0x6c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (108 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_SHORT: /* 0x6d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (109 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (110 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_SUPER: /* 0x6f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (111 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (112 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_STATIC: /* 0x71 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (113 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (114 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_73: /* 0x73 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (115 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (116 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (117 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (118 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (119 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (120 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_79: /* 0x79 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (121 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_7A: /* 0x7a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (122 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEG_INT: /* 0x7b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (123 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NOT_INT: /* 0x7c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (124 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEG_LONG: /* 0x7d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (125 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NOT_LONG: /* 0x7e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (126 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEG_FLOAT: /* 0x7f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (127 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_NEG_DOUBLE: /* 0x80 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (128 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_LONG: /* 0x81 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (129 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (130 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (131 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_LONG_TO_INT: /* 0x84 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (132 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (133 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (134 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (135 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (136 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (137 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (138 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (139 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (140 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_BYTE: /* 0x8d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (141 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_CHAR: /* 0x8e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (142 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INT_TO_SHORT: /* 0x8f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (143 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_INT: /* 0x90 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (144 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_INT: /* 0x91 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (145 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_INT: /* 0x92 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (146 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_INT: /* 0x93 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (147 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_INT: /* 0x94 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (148 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_INT: /* 0x95 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (149 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_INT: /* 0x96 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (150 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_INT: /* 0x97 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (151 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_INT: /* 0x98 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (152 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_INT: /* 0x99 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (153 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_INT: /* 0x9a */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (154 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_LONG: /* 0x9b */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (155 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_LONG: /* 0x9c */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (156 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_LONG: /* 0x9d */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (157 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_LONG: /* 0x9e */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (158 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_LONG: /* 0x9f */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (159 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_LONG: /* 0xa0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (160 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_LONG: /* 0xa1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (161 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_LONG: /* 0xa2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (162 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_LONG: /* 0xa3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (163 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_LONG: /* 0xa4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (164 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_LONG: /* 0xa5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (165 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_FLOAT: /* 0xa6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (166 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_FLOAT: /* 0xa7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (167 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_FLOAT: /* 0xa8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (168 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_FLOAT: /* 0xa9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (169 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_FLOAT: /* 0xaa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (170 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_DOUBLE: /* 0xab */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (171 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_DOUBLE: /* 0xac */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (172 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_DOUBLE: /* 0xad */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (173 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_DOUBLE: /* 0xae */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (174 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_DOUBLE: /* 0xaf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (175 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (176 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (177 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (178 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (179 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (180 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (181 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (182 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (183 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (184 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (185 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (186 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (187 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (188 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (189 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (190 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (191 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (192 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (193 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (194 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (195 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (196 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (197 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (198 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (199 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (200 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (201 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (202 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (203 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (204 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (205 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (206 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (207 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (208 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RSUB_INT: /* 0xd1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (209 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (210 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (211 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (212 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (213 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (214 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (215 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (216 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (217 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_MUL_INT_LIT8: /* 0xda */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (218 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (219 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_REM_INT_LIT8: /* 0xdc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (220 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_AND_INT_LIT8: /* 0xdd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (221 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_OR_INT_LIT8: /* 0xde */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (222 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (223 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (224 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (225 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (226 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (227 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (228 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (229 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (230 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (231 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (232 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (233 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (234 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (235 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_BREAKPOINT: /* 0xec */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (236 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (237 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_EXECUTE_INLINE: /* 0xee */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (238 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (239 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (240 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (241 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_QUICK: /* 0xf2 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (242 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (243 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (244 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_QUICK: /* 0xf5 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (245 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (246 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (247 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (248 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (249 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (250 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (251 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (252 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (253 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (254 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+/* ------------------------------ */
+ .balign 64
+.L_ALT_OP_UNUSED_FF: /* 0xff */
+/* File: armv5te/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ ldrb r3, [rSELF, #offThread_breakFlags]
+ adrl lr, dvmAsmInstructionStart + (255 * 64)
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ cmp r3, #0
+ bxeq lr @ nothing to do - jump to real handler
+ EXPORT_PC()
+ mov r0, rPC @ arg0
+ mov r1, rFP @ arg1
+ mov r2, rSELF @ arg2
+ b dvmCheckBefore @ (dPC,dFP,self) tail call
+
+ .balign 64
+ .size dvmAsmAltInstructionStart, .-dvmAsmAltInstructionStart
+ .global dvmAsmAltInstructionEnd
+dvmAsmAltInstructionEnd:
+/* File: armv5te/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+#if defined(WITH_JIT)
+
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * "longjmp" to a translation after single-stepping. Before returning
+ * to translation, must save state for self-verification.
+ */
+ .global dvmJitResumeTranslation @ (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+ mov rSELF, r0 @ restore self
+ mov rPC, r1 @ restore Dalvik pc
+ mov rFP, r2 @ restore Dalvik fp
+ ldr r10, [rSELF,#offThread_jitResumeNPC] @ resume address
+ mov r2, #0
+ str r2, [rSELF,#offThread_jitResumeNPC] @ reset resume address
+ ldr sp, [rSELF,#offThread_jitResumeNSP] @ cut back native stack
+ b jitSVShadowRunStart @ resume as if cache hit
+ @ expects resume addr in r10
+
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ mov r2,#kSVSPunt @ r2<- interpreter entry point
+ mov r3, #0
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ mov rPC, r0 @ set up dalvik pc
+ EXPORT_PC()
+ str lr, [rSELF,#offThread_jitResumeNPC]
+ str sp, [rSELF,#offThread_jitResumeNSP]
+ str r1, [rSELF,#offThread_jitResumeDPC]
+ mov r2,#kSVSSingleStep @ r2<- interpreter entry point
+ b jitSVShadowRunEnd @ doesn't return
+
+
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSNoProfile @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ ldr r0,[lr, #-1] @ pass our target PC
+ mov r2,#kSVSTraceSelect @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpBackwardBranch
+dvmJitToInterpBackwardBranch:
+ ldr r0,[lr, #-1] @ pass our target PC
+ mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ ldr r0,[lr, #-1] @ pass our target PC
+ mov r2,#kSVSNormal @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ mov r0,rPC @ pass our target PC
+ mov r2,#kSVSNoChain @ r2<- interpreter entry point
+ mov r3, #0 @ 0 means !inJitCodeCache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ b jitSVShadowRunEnd @ doesn't return
+#else
+
+/*
+ * "longjmp" to a translation after single-stepping.
+ */
+ .global dvmJitResumeTranslation @ (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+ mov rSELF, r0 @ restore self
+ mov rPC, r1 @ restore Dalvik pc
+ mov rFP, r2 @ restore Dalvik fp
+ ldr r0, [rSELF,#offThread_jitResumeNPC]
+ mov r2, #0
+ str r2, [rSELF,#offThread_jitResumeNPC] @ reset resume address
+ ldr sp, [rSELF,#offThread_jitResumeNSP] @ cut back native stack
+ bx r0 @ resume translation
+
+/*
+ * Return from the translation cache to the interpreter when the compiler is
+ * having issues translating/executing a Dalvik instruction. We have to skip
+ * the code cache lookup otherwise it is possible to indefinitely bouce
+ * between the interpreter and the code cache if the instruction that fails
+ * to be compiled happens to be at a trace start.
+ */
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ mov rPC, r0
+#if defined(WITH_JIT_TUNING)
+ mov r0,lr
+ bl dvmBumpPunt;
+#endif
+ EXPORT_PC()
+ mov r0, #0
+ str r0, [rSELF, #offThread_inJitCodeCache] @ Back to the interp land
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+
+/*
+ * Return to the interpreter to handle a single instruction.
+ * We'll use the normal single-stepping mechanism via interpBreak,
+ * but also save the native pc of the resume point in the translation
+ * and the native sp so that we can later do the equivalent of a
+ * longjmp() to resume.
+ * On entry:
+ * dPC <= Dalvik PC of instrucion to interpret
+ * lr <= resume point in translation
+ * r1 <= Dalvik PC of next instruction
+ */
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ mov rPC, r0 @ set up dalvik pc
+ EXPORT_PC()
+ str lr, [rSELF,#offThread_jitResumeNPC]
+ str sp, [rSELF,#offThread_jitResumeNSP]
+ str r1, [rSELF,#offThread_jitResumeDPC]
+ mov r1, #1
+ str r1, [rSELF,#offThread_singleStepCount] @ just step once
+ mov r0, rSELF
+ mov r1, #kSubModeCountedStep
+ bl dvmEnableSubMode @ (self, newMode)
+ ldr rIBASE, [rSELF,#offThread_curHandlerTable]
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target. Commonly used for callees.
+ */
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+#if defined(WITH_JIT_TUNING)
+ bl dvmBumpNoChain
+#endif
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0 @ !0 means translation exists
+ bxne r0 @ continue native execution if so
+ b 2f @ branch over to use the interpreter
+
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target. Commonly used following
+ * invokes.
+ */
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ ldr rPC,[lr, #-1] @ get our target PC
+ add rINST,lr,#-5 @ save start of chain branch
+ add rINST, #-4 @ .. which is 9 bytes back
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ cmp r0,#0
+ beq 2f
+ mov r1,rINST
+ bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0 @ successful chain?
+ bxne r0 @ continue native execution
+ b toInterpreter @ didn't chain - resume with interpreter
+
+/* No translation, so request one if profiling isn't disabled*/
+2:
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ FETCH_INST()
+ cmp r0, #0
+ movne r2,#kJitTSelectRequestHot @ ask for trace selection
+ bne common_selectTrace
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+
+/*
+ * Return from the translation cache to the interpreter.
+ * The return was done with a BLX from thumb mode, and
+ * the following 32-bit word contains the target rPC value.
+ * Note that lr (r14) will have its low-order bit set to denote
+ * its thumb-mode origin.
+ *
+ * We'll need to stash our lr origin away, recover the new
+ * target and then check to see if there is a translation available
+ * for our new target. If so, we do a translation chain and
+ * go back to native execution. Otherwise, it's back to the
+ * interpreter (after treating this entry as a potential
+ * trace start).
+ */
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ ldr rPC,[lr, #-1] @ get our target PC
+ add rINST,lr,#-5 @ save start of chain branch
+ add rINST,#-4 @ .. which is 9 bytes back
+#if defined(WITH_JIT_TUNING)
+ bl dvmBumpNormal
+#endif
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ cmp r0,#0
+ beq toInterpreter @ go if not, otherwise do chain
+ mov r1,rINST
+ bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr)
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0 @ successful chain?
+ bxne r0 @ continue native execution
+ b toInterpreter @ didn't chain - resume with interpreter
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+#if defined(WITH_JIT_TUNING)
+ bl dvmBumpNoChain
+#endif
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0
+ bxne r0 @ continue native execution if so
+ EXPORT_PC()
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ FETCH_INST()
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+#if defined(WITH_JIT_TUNING)
+ bl dvmBumpNoChain
+#endif
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0
+ bxne r0 @ continue native execution if so
+#endif
+
+/*
+ * No translation, restore interpreter regs and start interpreting.
+ * rSELF & rFP were preserved in the translated code, and rPC has
+ * already been restored by the time we get here. We'll need to set
+ * up rIBASE & rINST, and load the address of the JitTable into r0.
+ */
+toInterpreter:
+ EXPORT_PC()
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ FETCH_INST()
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ @ NOTE: intended fallthrough
+
+/*
+ * Similar to common_updateProfile, but tests for null pJitProfTable
+ * r0 holds pJifProfTAble, rINST is loaded, rPC is current and
+ * rIBASE has been recently refreshed.
+ */
+common_testUpdateProfile:
+ cmp r0, #0 @ JIT switched off?
+ beq 4f @ return to interp if so
+
+/*
+ * Common code to update potential trace start counter, and initiate
+ * a trace-build if appropriate.
+ * On entry here:
+ * r0 <= pJitProfTable (verified non-NULL)
+ * rPC <= Dalvik PC
+ * rINST <= next instruction
+ */
+common_updateProfile:
+ eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function
+ lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits
+ ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter
+ GET_INST_OPCODE(ip)
+ subs r1,r1,#1 @ decrement counter
+ strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it
+ GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */
+
+ /* Looks good, reset the counter */
+ ldr r1, [rSELF, #offThread_jitThreshold]
+ strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter
+ EXPORT_PC()
+ mov r0,rPC
+ mov r1,rSELF
+ bl dvmJitGetTraceAddrThread @ (pc, self)
+ str r0, [rSELF, #offThread_inJitCodeCache] @ set the inJitCodeCache flag
+ mov r1, rPC @ arg1 of translation may need this
+ mov lr, #0 @ in case target is HANDLER_INTERPRET
+ cmp r0,#0
+#if !defined(WITH_SELF_VERIFICATION)
+ bxne r0 @ jump to the translation
+ mov r2,#kJitTSelectRequest @ ask for trace selection
+ @ fall-through to common_selectTrace
+#else
+ moveq r2,#kJitTSelectRequest @ ask for trace selection
+ beq common_selectTrace
+ /*
+ * At this point, we have a target translation. However, if
+ * that translation is actually the interpret-only pseudo-translation
+ * we want to treat it the same as no translation.
+ */
+ mov r10, r0 @ save target
+ bl dvmCompilerGetInterpretTemplate
+ cmp r0, r10 @ special case?
+ bne jitSVShadowRunStart @ set up self verification shadow space
+ @ Need to clear the inJitCodeCache flag
+ mov r3, #0 @ 0 means not in the JIT code cache
+ str r3, [rSELF, #offThread_inJitCodeCache] @ back to the interp land
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+ /* no return */
+#endif
+
+/*
+ * On entry:
+ * r2 is jit state.
+ */
+common_selectTrace:
+ ldrh r0,[rSELF,#offThread_subMode]
+ ands r0, #(kSubModeJitTraceBuild | kSubModeJitSV)
+ bne 3f @ already doing JIT work, continue
+ str r2,[rSELF,#offThread_jitState]
+ mov r0, rSELF
+/*
+ * Call out to validate trace-building request. If successful,
+ * rIBASE will be swapped to to send us into single-stepping trace
+ * building mode, so we need to refresh before we continue.
+ */
+ EXPORT_PC()
+ SAVE_PC_FP_TO_SELF() @ copy of pc/fp to Thread
+ bl dvmJitCheckTraceRequest
+3:
+ FETCH_INST()
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+4:
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip)
+ /* no return */
+#endif
+
+#if defined(WITH_SELF_VERIFICATION)
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ * On entry:
+ * rPC, rFP, rSELF: the values that they should contain
+ * r10: the address of the target translation.
+ */
+jitSVShadowRunStart:
+ mov r0,rPC @ r0<- program counter
+ mov r1,rFP @ r1<- frame pointer
+ mov r2,rSELF @ r2<- self (Thread) pointer
+ mov r3,r10 @ r3<- target translation
+ bl dvmSelfVerificationSaveState @ save registers to shadow space
+ ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space
+ bx r10 @ jump to the translation
+
+/*
+ * Restore PC, registers, and interpreter state to original values
+ * before jumping back to the interpreter.
+ * On entry:
+ * r0: dPC
+ * r2: self verification state
+ */
+jitSVShadowRunEnd:
+ mov r1,rFP @ pass ending fp
+ mov r3,rSELF @ pass self ptr for convenience
+ bl dvmSelfVerificationRestoreState @ restore pc and fp values
+ LOAD_PC_FP_FROM_SELF() @ restore pc, fp
+ ldr r1,[r0,#offShadowSpace_svState] @ get self verification state
+ cmp r1,#0 @ check for punt condition
+ beq 1f
+ @ Set up SV single-stepping
+ mov r0, rSELF
+ mov r1, #kSubModeJitSV
+ bl dvmEnableSubMode @ (self, subMode)
+ mov r2,#kJitSelfVerification @ ask for self verification
+ str r2,[rSELF,#offThread_jitState]
+ @ intentional fallthrough
+1: @ exit to interpreter without check
+ EXPORT_PC()
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable]
+ FETCH_INST()
+ GET_INST_OPCODE(ip)
+ GOTO_OPCODE(ip)
+#endif
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ * It will end this interpreter activation, and return to the caller
+ * of dvmMterpStdRun.
+ *
+ * State registers will be saved to the "thread" area before bailing
+ * debugging purposes
+ */
+common_gotoBail:
+ SAVE_PC_FP_TO_SELF() @ export state to "thread"
+ mov r0, rSELF @ r0<- self ptr
+ b dvmMterpStdBail @ call(self, changeInterp)
+
+/*
+ * The JIT's invoke method needs to remember the callsite class and
+ * target pair. Save them here so that they are available to
+ * dvmCheckJit following the interpretation of this invoke.
+ */
+#if defined(WITH_JIT)
+save_callsiteinfo:
+ cmp r9, #0
+ ldrne r9, [r9, #offObject_clazz]
+ str r0, [rSELF, #offThread_methodToCall]
+ str r9, [rSELF, #offThread_callsiteClass]
+ bx lr
+#endif
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", r9 is "this"
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+#if defined(WITH_JIT)
+ ldrh r1, [rSELF, #offThread_subMode]
+ ands r1, #kSubModeJitTraceBuild
+ blne save_callsiteinfo
+#endif
+ @ prepare to copy args to "outs" area of current frame
+ movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero
+ SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
+ beq .LinvokeArgsDone @ if no args, skip the rest
+ FETCH(r1, 2) @ r1<- CCCC
+
+.LinvokeRangeArgs:
+ @ r0=methodToCall, r1=CCCC, r2=count, r10=outs
+ @ (very few methods have > 10 args; could unroll for common cases)
+ add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC]
+ sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args
+1: ldr r1, [r3], #4 @ val = *fp++
+ subs r2, r2, #1 @ count--
+ str r1, [r10], #4 @ *outs++ = val
+ bne 1b @ ...while count != 0
+ b .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", r9 is "this"
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+#if defined(WITH_JIT)
+ ldrh r1, [rSELF, #offThread_subMode]
+ ands r1, #kSubModeJitTraceBuild
+ blne save_callsiteinfo
+#endif
+ @ prepare to copy args to "outs" area of current frame
+ movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero
+ SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area
+ FETCH(r1, 2) @ r1<- GFED (load here to hide latency)
+ beq .LinvokeArgsDone
+
+ @ r0=methodToCall, r1=GFED, r2=count, r10=outs
+.LinvokeNonRange:
+ rsb r2, r2, #5 @ r2<- 5-r2
+ add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each
+ bl common_abort @ (skipped due to ARM prefetch)
+5: and ip, rINST, #0x0f00 @ isolate A
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2)
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vA
+4: and ip, r1, #0xf000 @ isolate G
+ ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2)
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vG
+3: and ip, r1, #0x0f00 @ isolate F
+ ldr r2, [rFP, ip, lsr #6] @ r2<- vF
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vF
+2: and ip, r1, #0x00f0 @ isolate E
+ ldr r2, [rFP, ip, lsr #2] @ r2<- vE
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vE
+1: and ip, r1, #0x000f @ isolate D
+ ldr r2, [rFP, ip, lsl #2] @ r2<- vD
+ mov r0, r0 @ nop
+ str r2, [r10, #-4]! @ *--outs = vD
+0: @ fall through to .LinvokeArgsDone
+
+.LinvokeArgsDone: @ r0=methodToCall
+ ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize
+ ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize
+ ldr r2, [r0, #offMethod_insns] @ r2<- method->insns
+ ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz
+ @ find space for the new stack frame, check for overflow
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area
+ sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize)
+ SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea
+@ bl common_dumpRegs
+ ldr r9, [rSELF, #offThread_interpStackEnd] @ r9<- interpStackEnd
+ sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize)
+ cmp r3, r9 @ bottom < interpStackEnd?
+ ldrh lr, [rSELF, #offThread_subMode]
+ ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags
+ blo .LstackOverflow @ yes, this frame will overflow stack
+
+ @ set up newSaveArea
+#ifdef EASY_GDB
+ SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area
+ str ip, [r10, #offStackSaveArea_prevSave]
+#endif
+ str rFP, [r10, #offStackSaveArea_prevFrame]
+ str rPC, [r10, #offStackSaveArea_savedPc]
+#if defined(WITH_JIT)
+ mov r9, #0
+ str r9, [r10, #offStackSaveArea_returnAddr]
+#endif
+ str r0, [r10, #offStackSaveArea_method]
+
+ @ Profiling?
+ cmp lr, #0 @ any special modes happening?
+ bne 2f @ go if so
+1:
+ tst r3, #ACC_NATIVE
+ bne .LinvokeNative
+
+ /*
+ stmfd sp!, {r0-r3}
+ bl common_printNewline
+ mov r0, rFP
+ mov r1, #0
+ bl dvmDumpFp
+ ldmfd sp!, {r0-r3}
+ stmfd sp!, {r0-r3}
+ mov r0, r1
+ mov r1, r10
+ bl dvmDumpFp
+ bl common_printNewline
+ ldmfd sp!, {r0-r3}
+ */
+
+ ldrh r9, [r2] @ r9 <- load INST from new PC
+ ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex
+ mov rPC, r2 @ publish new rPC
+
+ @ Update state values for the new method
+ @ r0=methodToCall, r1=newFp, r3=newMethodClass, r9=newINST
+ str r0, [rSELF, #offThread_method] @ self->method = methodToCall
+ str r3, [rSELF, #offThread_methodClassDex] @ self->methodClassDex = ...
+ mov r2, #1
+ str r2, [rSELF, #offThread_debugIsMethodEntry]
+#if defined(WITH_JIT)
+ ldr r0, [rSELF, #offThread_pJitProfTable]
+ mov rFP, r1 @ fp = newFp
+ GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9
+ mov rINST, r9 @ publish new rINST
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ cmp r0,#0
+ bne common_updateProfile
+ GOTO_OPCODE(ip) @ jump to next instruction
+#else
+ mov rFP, r1 @ fp = newFp
+ GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9
+ mov rINST, r9 @ publish new rINST
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ GOTO_OPCODE(ip) @ jump to next instruction
+#endif
+
+2:
+ @ Profiling - record method entry. r0: methodToCall
+ stmfd sp!, {r0-r3} @ preserve r0-r3
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ mov r1, r0
+ mov r0, rSELF
+ bl dvmReportInvoke @ (self, method)
+ ldmfd sp!, {r0-r3} @ restore r0-r3
+ b 1b
+
+.LinvokeNative:
+ @ Prep for the native call
+ @ r0=methodToCall, r1=newFp, r10=newSaveArea
+ ldrh lr, [rSELF, #offThread_subMode]
+ ldr r9, [rSELF, #offThread_jniLocal_topCookie]@r9<-thread->localRef->...
+ str r1, [rSELF, #offThread_curFrame] @ curFrame = newFp
+ str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top
+ mov r2, r0 @ r2<- methodToCall
+ mov r0, r1 @ r0<- newFp (points to args)
+ add r1, rSELF, #offThread_retval @ r1<- &retval
+ mov r3, rSELF @ arg3<- self
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ b .Lskip
+ .type dalvik_mterp, %function
+dalvik_mterp:
+ .fnstart
+ MTERP_ENTRY1
+ MTERP_ENTRY2
+.Lskip:
+#endif
+
+ cmp lr, #0 @ any special SubModes active?
+ bne 11f @ go handle them if so
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
+7:
+
+ @ native return; r10=newSaveArea
+ @ equivalent to dvmPopJniLocals
+ ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top
+ ldr r1, [rSELF, #offThread_exception] @ check for exception
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+ cmp r1, #0 @ null?
+ str r0, [rSELF, #offThread_jniLocal_topCookie] @ new top <- old top
+ bne common_exceptionThrown @ no, handle exception
+
+ FETCH_ADVANCE_INST(3) @ advance rPC, load rINST
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+11:
+ @ r0=newFp, r1=&retval, r2=methodToCall, r3=self, lr=subModes
+ stmfd sp!, {r0-r3} @ save all but subModes
+ mov r0, r2 @ r0<- methodToCall
+ mov r1, rSELF
+ mov r2, rFP
+ bl dvmReportPreNativeInvoke @ (methodToCall, self, fp)
+ ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
+
+ @ Call the native method
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
+
+ @ Restore the pre-call arguments
+ ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
+
+ @ Finish up any post-invoke subMode requirements
+ mov r0, r2 @ r0<- methodToCall
+ mov r1, rSELF
+ mov r2, rFP
+ bl dvmReportPostNativeInvoke @ (methodToCall, self, fp)
+ b 7b @ resume
+
+.LstackOverflow: @ r0=methodToCall
+ mov r1, r0 @ r1<- methodToCall
+ mov r0, rSELF @ r0<- self
+ bl dvmHandleStackOverflow
+ b common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+ .fnend
+ .size dalvik_mterp, .-dalvik_mterp
+#endif
+
+
+ /*
+ * Common code for method invocation, calling through "glue code".
+ *
+ * TODO: now that we have range and non-range invoke handlers, this
+ * needs to be split into two. Maybe just create entry points
+ * that set r9 and jump here?
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ * r9 is "bool methodCallRange", indicating if this is a /range variant
+ */
+ .if 0
+.LinvokeOld:
+ sub sp, sp, #8 @ space for args + pad
+ FETCH(ip, 2) @ ip<- FEDC or CCCC
+ mov r2, r0 @ A2<- methodToCall
+ mov r0, rSELF @ A0<- self
+ SAVE_PC_FP_TO_SELF() @ export state to "self"
+ mov r1, r9 @ A1<- methodCallRange
+ mov r3, rINST, lsr #8 @ A3<- AA
+ str ip, [sp, #0] @ A4<- ip
+ bl dvmMterp_invokeMethod @ call the C invokeMethod
+ add sp, sp, #8 @ remove arg area
+ b common_resumeAfterGlueCall @ continue to next instruction
+ .endif
+
+
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+ ldrh lr, [rSELF, #offThread_subMode]
+ SAVEAREA_FROM_FP(r0, rFP)
+ ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc
+ cmp lr, #0 @ any special subMode handling needed?
+ bne 19f
+14:
+ ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame
+ ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)]
+ @ r2<- method we're returning to
+ cmp r2, #0 @ is this a break frame?
+#if defined(WORKAROUND_CORTEX_A9_745320)
+ /* Don't use conditional loads if the HW defect exists */
+ beq 15f
+ ldr r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+15:
+#else
+ ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz
+#endif
+ beq common_gotoBail @ break frame, bail out completely
+
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST
+ str r2, [rSELF, #offThread_method]@ self->method = newSave->method
+ ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex
+ str rFP, [rSELF, #offThread_curFrame] @ curFrame = fp
+#if defined(WITH_JIT)
+ ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr
+ mov rPC, r9 @ publish new rPC
+ str r1, [rSELF, #offThread_methodClassDex]
+ str r10, [rSELF, #offThread_inJitCodeCache] @ may return to JIT'ed land
+ cmp r10, #0 @ caller is compiled code
+ blxne r10
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+#else
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ mov rPC, r9 @ publish new rPC
+ str r1, [rSELF, #offThread_methodClassDex]
+ GOTO_OPCODE(ip) @ jump to next instruction
+#endif
+
+19:
+ @ Handle special actions
+ @ On entry, r0: StackSaveArea
+ ldr r1, [r0, #offStackSaveArea_prevFrame] @ r2<- prevFP
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str r1, [rSELF, #offThread_curFrame] @ update interpSave.curFrame
+ mov r0, rSELF
+ bl dvmReportReturn @ (self)
+ SAVEAREA_FROM_FP(r0, rFP) @ restore StackSaveArea
+ b 14b @ continue
+
+ /*
+ * Return handling, calls through "glue code".
+ */
+ .if 0
+.LreturnOld:
+ SAVE_PC_FP_TO_SELF() @ export state
+ mov r0, rSELF @ arg to function
+ bl dvmMterp_returnFromMethod
+ b common_resumeAfterGlueCall
+ .endif
+
+
+/*
+ * Somebody has thrown an exception. Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+ .global dvmMterpCommonExceptionThrown
+dvmMterpCommonExceptionThrown:
+common_exceptionThrown:
+.LexceptionNew:
+
+ EXPORT_PC()
+
+ mov r0, rSELF
+ bl dvmCheckSuspendPending
+
+ ldr r9, [rSELF, #offThread_exception] @ r9<- self->exception
+ mov r1, rSELF @ r1<- self
+ mov r0, r9 @ r0<- exception
+ bl dvmAddTrackedAlloc @ don't let the exception be GCed
+ ldrh r2, [rSELF, #offThread_subMode] @ get subMode flags
+ mov r3, #0 @ r3<- NULL
+ str r3, [rSELF, #offThread_exception] @ self->exception = NULL
+
+ @ Special subMode?
+ cmp r2, #0 @ any special subMode handling needed?
+ bne 7f @ go if so
+8:
+ /* set up args and a local for "&fp" */
+ /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */
+ str rFP, [sp, #-4]! @ *--sp = fp
+ mov ip, sp @ ip<- &fp
+ mov r3, #0 @ r3<- false
+ str ip, [sp, #-4]! @ *--sp = &fp
+ ldr r1, [rSELF, #offThread_method] @ r1<- self->method
+ mov r0, rSELF @ r0<- self
+ ldr r1, [r1, #offMethod_insns] @ r1<- method->insns
+ ldrh lr, [rSELF, #offThread_subMode] @ lr<- subMode flags
+ mov r2, r9 @ r2<- exception
+ sub r1, rPC, r1 @ r1<- pc - method->insns
+ mov r1, r1, asr #1 @ r1<- offset in code units
+
+ /* call, r0 gets catchRelPc (a code-unit offset) */
+ bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp)
+
+ /* fix earlier stack overflow if necessary; may trash rFP */
+ ldrb r1, [rSELF, #offThread_stackOverflowed]
+ cmp r1, #0 @ did we overflow earlier?
+ beq 1f @ no, skip ahead
+ mov rFP, r0 @ save relPc result in rFP
+ mov r0, rSELF @ r0<- self
+ mov r1, r9 @ r1<- exception
+ bl dvmCleanupStackOverflow @ call(self)
+ mov r0, rFP @ restore result
+1:
+
+ /* update frame pointer and check result from dvmFindCatchBlock */
+ ldr rFP, [sp, #4] @ retrieve the updated rFP
+ cmp r0, #0 @ is catchRelPc < 0?
+ add sp, sp, #8 @ restore stack
+ bmi .LnotCaughtLocally
+
+ /* adjust locals to match self->interpSave.curFrame and updated PC */
+ SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area
+ ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method
+ str r1, [rSELF, #offThread_method] @ self->method = new method
+ ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz
+ ldr r3, [r1, #offMethod_insns] @ r3<- method->insns
+ ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex
+ add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc
+ str r2, [rSELF, #offThread_methodClassDex] @ self->pDvmDex = meth...
+
+ /* release the tracked alloc on the exception */
+ mov r0, r9 @ r0<- exception
+ mov r1, rSELF @ r1<- self
+ bl dvmReleaseTrackedAlloc @ release the exception
+
+ /* restore the exception if the handler wants it */
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh rIBASE
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"?
+ streq r9, [rSELF, #offThread_exception] @ yes, restore the exception
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+ @ Manage debugger bookkeeping
+7:
+ str rPC, [rSELF, #offThread_pc] @ update interpSave.pc
+ str rFP, [rSELF, #offThread_curFrame] @ update interpSave.curFrame
+ mov r0, rSELF @ arg0<- self
+ mov r1, r9 @ arg1<- exception
+ bl dvmReportExceptionThrow @ (self, exception)
+ b 8b @ resume with normal handling
+
+.LnotCaughtLocally: @ r9=exception
+ /* fix stack overflow if necessary */
+ ldrb r1, [rSELF, #offThread_stackOverflowed]
+ cmp r1, #0 @ did we overflow earlier?
+ movne r0, rSELF @ if yes: r0<- self
+ movne r1, r9 @ if yes: r1<- exception
+ blne dvmCleanupStackOverflow @ if yes: call(self)
+
+ @ may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+ /* call __android_log_print(prio, tag, format, ...) */
+ /* "Exception %s from %s:%d not caught locally" */
+ @ dvmLineNumFromPC(method, pc - method->insns)
+ ldr r0, [rSELF, #offThread_method]
+ ldr r1, [r0, #offMethod_insns]
+ sub r1, rPC, r1
+ asr r1, r1, #1
+ bl dvmLineNumFromPC
+ str r0, [sp, #-4]!
+ @ dvmGetMethodSourceFile(method)
+ ldr r0, [rSELF, #offThread_method]
+ bl dvmGetMethodSourceFile
+ str r0, [sp, #-4]!
+ @ exception->clazz->descriptor
+ ldr r3, [r9, #offObject_clazz]
+ ldr r3, [r3, #offClassObject_descriptor]
+ @
+ ldr r2, strExceptionNotCaughtLocally
+ ldr r1, strLogTag
+ mov r0, #3 @ LOG_DEBUG
+ bl __android_log_print
+#endif
+ str r9, [rSELF, #offThread_exception] @ restore exception
+ mov r0, r9 @ r0<- exception
+ mov r1, rSELF @ r1<- self
+ bl dvmReleaseTrackedAlloc @ release the exception
+ b common_gotoBail @ bail out
+
+
+ /*
+ * Exception handling, calls through "glue code".
+ */
+ .if 0
+.LexceptionOld:
+ SAVE_PC_FP_TO_SELF() @ export state
+ mov r0, rSELF @ arg to function
+ bl dvmMterp_exceptionThrown
+ b common_resumeAfterGlueCall
+ .endif
+
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including the current
+ * instruction.
+ *
+ * On entry:
+ * r10: &dvmDex->pResFields[field]
+ * r0: field pointer (must preserve)
+ */
+common_verifyField:
+ ldrh r3, [rSELF, #offThread_subMode] @ r3 <- submode byte
+ ands r3, #kSubModeJitTraceBuild
+ bxeq lr @ Not building trace, continue
+ ldr r1, [r10] @ r1<- reload resolved StaticField ptr
+ cmp r1, #0 @ resolution complete?
+ bxne lr @ yes, continue
+ stmfd sp!, {r0-r2,lr} @ save regs
+ mov r0, rSELF
+ mov r1, rPC
+ bl dvmJitEndTraceSelect @ (self,pc) end trace before this inst
+ ldmfd sp!, {r0-r2, lr}
+ bx lr @ return
+#endif
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+ LOAD_PC_FP_FROM_SELF() @ pull rPC and rFP out of thread
+ ldr rIBASE, [rSELF, #offThread_curHandlerTable] @ refresh
+ FETCH_INST() @ load rINST from rPC
+ GET_INST_OPCODE(ip) @ extract opcode from rINST
+ GOTO_OPCODE(ip) @ jump to next instruction
+
+/*
+ * Invalid array index. Note that our calling convention is strange; we use r1
+ * and r3 because those just happen to be the registers all our callers are
+ * using. We move r3 before calling the C function, but r1 happens to match.
+ * r1: index
+ * r3: size
+ */
+common_errArrayIndex:
+ EXPORT_PC()
+ mov r0, r3
+ bl dvmThrowArrayIndexOutOfBoundsException
+ b common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+ ldr r0, strDivideByZero
+ bl dvmThrowArithmeticException
+ b common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ * On entry: length in r1
+ */
+common_errNegativeArraySize:
+ EXPORT_PC()
+ mov r0, r1 @ arg0 <- len
+ bl dvmThrowNegativeArraySizeException @ (len)
+ b common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ * On entry: method name in r1
+ */
+common_errNoSuchMethod:
+ EXPORT_PC()
+ mov r0, r1
+ bl dvmThrowNoSuchMethodError
+ b common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one. We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+ EXPORT_PC()
+ mov r0, #0
+ bl dvmThrowNullPointerException
+ b common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault. The source address will
+ * be in lr (use a bl instruction to jump here).
+ */
+common_abort:
+ ldr pc, .LdeadFood
+.LdeadFood:
+ .word 0xdeadf00d
+
+/*
+ * Spit out a "we were here", preserving all registers. (The attempt
+ * to save ip won't work, but we need to save an even number of
+ * registers for EABI 64-bit stack alignment.)
+ */
+ .macro SQUEAK num
+common_squeak\num:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ ldr r0, strSqueak
+ mov r1, #\num
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+ .endm
+
+ SQUEAK 0
+ SQUEAK 1
+ SQUEAK 2
+ SQUEAK 3
+ SQUEAK 4
+ SQUEAK 5
+
+/*
+ * Spit out the number in r0, preserving registers.
+ */
+common_printNum:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r1, r0
+ ldr r0, strSqueak
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ ldr r0, strNewline
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+ /*
+ * Print the 32-bit quantity in r0 as a hex value, preserving registers.
+ */
+common_printHex:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r1, r0
+ ldr r0, strPrintHex
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print the 64-bit quantity in r0-r1, preserving registers.
+ */
+common_printLong:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ mov r3, r1
+ mov r2, r0
+ ldr r0, strPrintLong
+ bl printf
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Print full method info. Pass the Method* in r0. Preserves regs.
+ */
+common_printMethod:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bl dvmMterpPrintMethod
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info. Requires the C function to be compiled in.
+ */
+ .if 0
+common_dumpRegs:
+ stmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bl dvmMterpDumpArmRegs
+ ldmfd sp!, {r0, r1, r2, r3, ip, lr}
+ bx lr
+ .endif
+
+#if 0
+/*
+ * Experiment on VFP mode.
+ *
+ * uint32_t setFPSCR(uint32_t val, uint32_t mask)
+ *
+ * Updates the bits specified by "mask", setting them to the values in "val".
+ */
+setFPSCR:
+ and r0, r0, r1 @ make sure no stray bits are set
+ fmrx r2, fpscr @ get VFP reg
+ mvn r1, r1 @ bit-invert mask
+ and r2, r2, r1 @ clear masked bits
+ orr r2, r2, r0 @ set specified bits
+ fmxr fpscr, r2 @ set VFP reg
+ mov r0, r2 @ return new value
+ bx lr
+
+ .align 2
+ .global dvmConfigureFP
+ .type dvmConfigureFP, %function
+dvmConfigureFP:
+ stmfd sp!, {ip, lr}
+ /* 0x03000000 sets DN/FZ */
+ /* 0x00009f00 clears the six exception enable flags */
+ bl common_squeak0
+ mov r0, #0x03000000 @ r0<- 0x03000000
+ add r1, r0, #0x9f00 @ r1<- 0x03009f00
+ bl setFPSCR
+ ldmfd sp!, {ip, pc}
+#endif
+
+
+/*
+ * String references, must be close to the code that uses them.
+ */
+ .align 2
+strDivideByZero:
+ .word .LstrDivideByZero
+strLogTag:
+ .word .LstrLogTag
+strExceptionNotCaughtLocally:
+ .word .LstrExceptionNotCaughtLocally
+
+strNewline:
+ .word .LstrNewline
+strSqueak:
+ .word .LstrSqueak
+strPrintHex:
+ .word .LstrPrintHex
+strPrintLong:
+ .word .LstrPrintLong
+
+/*
+ * Zero-terminated ASCII string data.
+ *
+ * On ARM we have two choices: do like gcc does, and LDR from a .word
+ * with the address, or use an ADR pseudo-op to get the address
+ * directly. ADR saves 4 bytes and an indirection, but it's using a
+ * PC-relative addressing mode and hence has a limited range, which
+ * makes it not work well with mergeable string sections.
+ */
+ .section .rodata.str1.4,"aMS",%progbits,1
+
+.LstrBadEntryPoint:
+ .asciz "Bad entry point %d\n"
+.LstrFilledNewArrayNotImpl:
+ .asciz "filled-new-array only implemented for objects and 'int'"
+.LstrDivideByZero:
+ .asciz "divide by zero"
+.LstrLogTag:
+ .asciz "mterp"
+.LstrExceptionNotCaughtLocally:
+ .asciz "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+ .asciz "\n"
+.LstrSqueak:
+ .asciz "<%d>"
+.LstrPrintHex:
+ .asciz "<%#x>"
+.LstrPrintLong:
+ .asciz "<%lld>"
+
diff --git a/vm/mterp/out/InterpAsm-armv7-a-neon.S b/vm/mterp/out/InterpAsm-armv7-a-neon.S
index 2bed3ef39..de1963238 100644
--- a/vm/mterp/out/InterpAsm-armv7-a-neon.S
+++ b/vm/mterp/out/InterpAsm-armv7-a-neon.S
@@ -16235,8 +16235,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -16262,8 +16262,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpAsm-armv7-a.S b/vm/mterp/out/InterpAsm-armv7-a.S
index c9ebb1d7b..78032dbb5 100644
--- a/vm/mterp/out/InterpAsm-armv7-a.S
+++ b/vm/mterp/out/InterpAsm-armv7-a.S
@@ -16235,8 +16235,8 @@ dalvik_mterp:
cmp lr, #0 @ any special SubModes active?
bne 11f @ go handle them if so
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
7:
@ native return; r10=newSaveArea
@@ -16262,8 +16262,8 @@ dalvik_mterp:
ldmfd sp, {r0-r3} @ refresh. NOTE: no sp autoincrement
@ Call the native method
- mov lr, pc @ set return addr
- ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ ldr ip, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc
+ blx ip
@ Restore the pre-call arguments
ldmfd sp!, {r0-r3} @ r2<- methodToCall (others unneeded)
diff --git a/vm/mterp/out/InterpC-armv6-vfp.cpp b/vm/mterp/out/InterpC-armv6-vfp.cpp
new file mode 100644
index 000000000..a5b542cab
--- /dev/null
+++ b/vm/mterp/out/InterpC-armv6-vfp.cpp
@@ -0,0 +1,1249 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv6-vfp'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.cpp */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h> // needed for fmod, fmodf
+#include "mterp/common/FindInterface.h"
+
+/*
+ * Configuration defines. These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ * WITH_INSTR_CHECKS
+ * WITH_TRACKREF_CHECKS
+ * EASY_GDB
+ * NDEBUG
+ */
+
+#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia (slow!) */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types. We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
+ *
+ * There are two common approaches:
+ * (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ * (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other. For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call. The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy(). The current gcc for ARM seems to do
+ * better with the union.
+ */
+#if defined(__ARM_EABI__)
+# define NO_UNALIGN_64__UNION
+#endif
+
+
+//#define LOG_INSTR /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Export another copy of the PC on every instruction; this is largely
+ * redundant with EXPORT_PC and the debugger code. This value can be
+ * compared against what we have stored on the stack with EXPORT_PC to
+ * help ensure that we aren't missing any export calls.
+ */
+#if WITH_EXTRA_GC_CHECKS > 1
+# define EXPORT_EXTRA_PC() (self->currentPc2 = pc)
+#else
+# define EXPORT_EXTRA_PC()
+#endif
+
+/*
+ * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do { \
+ int myoff = _offset; /* deref only once */ \
+ if (pc + myoff < curMethod->insns || \
+ pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+ { \
+ char* desc; \
+ desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
+ ALOGE("Invalid branch %d at 0x%04x in %s.%s %s", \
+ myoff, (int) (pc - curMethod->insns), \
+ curMethod->clazz->descriptor, curMethod->name, desc); \
+ free(desc); \
+ dvmAbort(); \
+ } \
+ pc += myoff; \
+ EXPORT_EXTRA_PC(); \
+ } while (false)
+#else
+# define ADJUST_PC(_offset) do { \
+ pc += _offset; \
+ EXPORT_EXTRA_PC(); \
+ } while (false)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do { \
+ char debugStrBuf[128]; \
+ snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
+ if (curMethod != NULL) \
+ ALOG(_level, LOG_TAG"i", "%-2d|%04x%s", \
+ self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+ else \
+ ALOG(_level, LOG_TAG"i", "%-2d|####%s", \
+ self->threadId, debugStrBuf); \
+ } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = " ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.ll;
+#else
+ s8 val;
+ memcpy(&val, &ptr[idx], 8);
+ return val;
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.ll = val;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#else
+ memcpy(&ptr[idx], &val, 8);
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.d;
+#else
+ double dval;
+ memcpy(&dval, &ptr[idx], 8);
+ return dval;
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.d = dval;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#else
+ memcpy(&ptr[idx], &dval, 8);
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access. Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ (void)putLongToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
+# define GET_REGISTER_FLOAT(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ (void)putDoubleToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
+#else
+# define GET_REGISTER(_idx) (fp[(_idx)])
+# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter. We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset) (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst) ((_inst) & 0xff)
+
+/*
+ * Replace the opcode (used when handling breakpoints). _opcode is a u1.
+ */
+#define INST_REPLACE_OP(_inst, _opcode) (((_inst) & 0xff00) | _opcode)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst) ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst) ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly. If we don't do this,
+ * the offset within the current method won't be shown correctly. See the
+ * notes in Exception.c.
+ *
+ * This is also used to determine the address for precise GC.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+ if (obj == NULL) {
+ dvmThrowNullPointerException(NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsHeapAddress(obj)) {
+ ALOGE("Invalid object %p", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ ALOGE("Invalid object class %p (in %p)", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+ if (obj == NULL) {
+ EXPORT_PC();
+ dvmThrowNullPointerException(NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsHeapAddress(obj)) {
+ ALOGE("Invalid object %p", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ ALOGE("Invalid object class %p (in %p)", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/* File: cstubs/stubdefs.cpp */
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...) \
+ extern "C" void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
+
+/* (void)xxx to quiet unused variable compiler warnings. */
+#define GOTO_TARGET(_target, ...) \
+ void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ const Method* methodToCall; \
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+/*
+ * Redefine what used to be local variable accesses into Thread struct
+ * references. (These are undefined down in "footer.cpp".)
+ */
+#define retval self->interpSave.retval
+#define pc self->interpSave.pc
+#define fp self->interpSave.curFrame
+#define curMethod self->interpSave.method
+#define methodClassDex self->interpSave.methodClassDex
+#define debugTrackedRefStart self->interpSave.debugTrackedRefStart
+
+/* ugh */
+#define STUB_HACK(x) x
+#if defined(WITH_JIT)
+#define JIT_STUB_HACK(x) x
+#else
+#define JIT_STUB_HACK(x)
+#endif
+
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
+#define PC_TO_SELF()
+
+/*
+ * Opcode handler framing macros. Here, each opcode is a separate function
+ * that takes a "self" argument and returns void. We can't declare
+ * these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
+ */
+#define HANDLE_OPCODE(_op) \
+ extern "C" void dvmMterp_##_op(Thread* self); \
+ void dvmMterp_##_op(Thread* self) { \
+ u4 ref; \
+ u2 vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
+
+#define OP_END }
+
+/*
+ * Like the "portable" FINISH, but don't reload "inst", and return to caller
+ * when done. Further, debugger/profiler checks are handled
+ * before handler execution in mterp, so we don't do them here either.
+ */
+#if defined(WITH_JIT)
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { \
+ dvmCheckJit(pc, self); \
+ } \
+ return; \
+ }
+#else
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ return; \
+ }
+#endif
+
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements. Some of the functions take arguments, which in the
+ * portable interpreter are handled by assigning values to globals.
+ */
+
+#define GOTO_exceptionThrown() \
+ do { \
+ dvmMterp_exceptionThrown(self); \
+ return; \
+ } while(false)
+
+#define GOTO_returnFromMethod() \
+ do { \
+ dvmMterp_returnFromMethod(self); \
+ return; \
+ } while(false)
+
+#define GOTO_invoke(_target, _methodCallRange) \
+ do { \
+ dvmMterp_##_target(self, _methodCallRange); \
+ return; \
+ } while(false)
+
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
+ do { \
+ dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall, \
+ _vsrc1, _vdst); \
+ return; \
+ } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp.
+ */
+#define GOTO_bail() \
+ dvmMterpStdBail(self, false);
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started.
+ */
+#define PERIODIC_CHECKS(_pcadj) { \
+ if (dvmCheckSuspendQuick(self)) { \
+ EXPORT_PC(); /* need for precise GC */ \
+ dvmCheckSuspendPending(self); \
+ } \
+ }
+
+/* File: c/opcommon.cpp */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+ u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor. These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_totype(vdst, \
+ GET_REGISTER##_fromtype(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
+ _tovtype, _tortype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ { \
+ /* spec defines specific handling for +/- inf and NaN values */ \
+ _fromvtype val; \
+ _tovtype intMin, intMax, result; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ val = GET_REGISTER##_fromrtype(vsrc1); \
+ intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
+ intMax = ~intMin; \
+ result = (_tovtype) val; \
+ if (val >= intMax) /* +inf */ \
+ result = intMax; \
+ else if (val <= intMin) /* -inf */ \
+ result = intMin; \
+ else if (val != val) /* NaN */ \
+ result = 0; \
+ else \
+ result = (_tovtype) val; \
+ SET_REGISTER##_tortype(vdst, result); \
+ } \
+ FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
+ FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ int result; \
+ u2 regs; \
+ _varType val1, val2; \
+ vdst = INST_AA(inst); \
+ regs = FETCH(1); \
+ vsrc1 = regs & 0xff; \
+ vsrc2 = regs >> 8; \
+ ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ val1 = GET_REGISTER##_type(vsrc1); \
+ val2 = GET_REGISTER##_type(vsrc2); \
+ if (val1 == val2) \
+ result = 0; \
+ else if (val1 < val2) \
+ result = -1; \
+ else if (val1 > val2) \
+ result = 1; \
+ else \
+ result = (_nanVal); \
+ ILOGV("+ result=%d", result); \
+ SET_REGISTER(vdst, result); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
+ vsrc1 = INST_A(inst); \
+ vsrc2 = INST_B(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
+ branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
+ FINISH(2); \
+ }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
+ vsrc1 = INST_AA(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
+ FINISH(2); \
+ }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
+ FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ secondVal = GET_REGISTER(vsrc2); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ vsrc2 = FETCH(1); \
+ ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s2) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
+ /* won't generate /lit16 instr for this; check anyway */ \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op (s2) vsrc2; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s1) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op ((s1) vsrc2); \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vdst); \
+ secondVal = GET_REGISTER(vsrc1); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vsrc1); \
+ secondVal = GET_REGISTER_WIDE(vsrc2); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vdst); \
+ secondVal = GET_REGISTER_WIDE(vsrc1); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* index */ \
+ ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowArrayIndexOutOfBoundsException( \
+ arrayObj->length, GET_REGISTER(vsrc2)); \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)]); \
+ ILOGV("+ AGET[%d]=%#x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); /* AA: source value */ \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* CC: index */ \
+ ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowArrayIndexOutOfBoundsException( \
+ arrayObj->length, GET_REGISTER(vsrc2)); \
+ GOTO_exceptionThrown(); \
+ } \
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+ ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)] = \
+ GET_REGISTER##_regsize(vdst); \
+ } \
+ FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits. Consider:
+ * short foo = -1 (sets a 32-bit register to 0xffffffff)
+ * iput-quick foo (writes all 32 bits to the field)
+ * short bar = 1 (sets a 32-bit register to 0x00000001)
+ * iput-short (writes the low 16 bits to the field)
+ * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field. This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time. On
+ * a device with a 16-bit data bus this is sub-optimal. (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
+ ILOGV("+ IGETQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUTQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+/*
+ * The JIT needs dvmDexGetResolvedField() to return non-null.
+ * Because the portable interpreter is not involved with the JIT
+ * and trace building, we only need the extra check here when this
+ * code is massaged into a stub called from an assembly interpreter.
+ * This is controlled by the JIT_STUB_HACK maco.
+ */
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
+ JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
+ } \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
+ JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
+ } \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+/* File: cstubs/enddefs.cpp */
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef curMethod
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+
+/* File: armv5te/debug.cpp */
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose ARM registers, along with some other info.
+ *
+ * This function MUST be compiled in ARM mode -- THUMB will yield bogus
+ * results.
+ *
+ * This will NOT preserve r0-r3/ip.
+ */
+void dvmMterpDumpArmRegs(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3)
+{
+ // TODO: Clang does not support asm declaration syntax.
+#ifndef __clang__
+ register uint32_t rPC asm("r4");
+ register uint32_t rFP asm("r5");
+ register uint32_t rSELF asm("r6");
+ register uint32_t rINST asm("r7");
+ register uint32_t rIBASE asm("r8");
+ register uint32_t r9 asm("r9");
+ register uint32_t r10 asm("r10");
+
+ //extern char dvmAsmInstructionStart[];
+
+ printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
+ printf(" : rPC=%08x rFP=%08x rSELF=%08x rINST=%08x\n",
+ rPC, rFP, rSELF, rINST);
+ printf(" : rIBASE=%08x r9=%08x r10=%08x\n", rIBASE, r9, r10);
+#endif
+
+ //Thread* self = (Thread*) rSELF;
+ //const Method* method = self->method;
+ printf(" + self is %p\n", dvmThreadSelf());
+ //printf(" + currently in %s.%s %s\n",
+ // method->clazz->descriptor, method->name, method->shorty);
+ //printf(" + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+ //printf(" + next handler for 0x%02x = %p\n",
+ // rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+ StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+ printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+ printf(" prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+ saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc);
+#else
+ printf(" prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+ saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc,
+ *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+ /*
+ * It is a direct (non-virtual) method if it is static, private,
+ * or a constructor.
+ */
+ bool isDirect =
+ ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+ (method->name[0] == '<');
+
+ char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+
+ printf("<%c:%s.%s %s> ",
+ isDirect ? 'D' : 'V',
+ method->clazz->descriptor,
+ method->name,
+ desc);
+
+ free(desc);
+}
+
diff --git a/vm/mterp/out/InterpC-armv6j.cpp b/vm/mterp/out/InterpC-armv6j.cpp
new file mode 100644
index 000000000..b9c203516
--- /dev/null
+++ b/vm/mterp/out/InterpC-armv6j.cpp
@@ -0,0 +1,1249 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'armv6j'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.cpp */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h> // needed for fmod, fmodf
+#include "mterp/common/FindInterface.h"
+
+/*
+ * Configuration defines. These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ * WITH_INSTR_CHECKS
+ * WITH_TRACKREF_CHECKS
+ * EASY_GDB
+ * NDEBUG
+ */
+
+#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia (slow!) */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types. We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
+ *
+ * There are two common approaches:
+ * (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ * (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other. For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call. The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy(). The current gcc for ARM seems to do
+ * better with the union.
+ */
+#if defined(__ARM_EABI__)
+# define NO_UNALIGN_64__UNION
+#endif
+
+
+//#define LOG_INSTR /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Export another copy of the PC on every instruction; this is largely
+ * redundant with EXPORT_PC and the debugger code. This value can be
+ * compared against what we have stored on the stack with EXPORT_PC to
+ * help ensure that we aren't missing any export calls.
+ */
+#if WITH_EXTRA_GC_CHECKS > 1
+# define EXPORT_EXTRA_PC() (self->currentPc2 = pc)
+#else
+# define EXPORT_EXTRA_PC()
+#endif
+
+/*
+ * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do { \
+ int myoff = _offset; /* deref only once */ \
+ if (pc + myoff < curMethod->insns || \
+ pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+ { \
+ char* desc; \
+ desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
+ ALOGE("Invalid branch %d at 0x%04x in %s.%s %s", \
+ myoff, (int) (pc - curMethod->insns), \
+ curMethod->clazz->descriptor, curMethod->name, desc); \
+ free(desc); \
+ dvmAbort(); \
+ } \
+ pc += myoff; \
+ EXPORT_EXTRA_PC(); \
+ } while (false)
+#else
+# define ADJUST_PC(_offset) do { \
+ pc += _offset; \
+ EXPORT_EXTRA_PC(); \
+ } while (false)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do { \
+ char debugStrBuf[128]; \
+ snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
+ if (curMethod != NULL) \
+ ALOG(_level, LOG_TAG"i", "%-2d|%04x%s", \
+ self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+ else \
+ ALOG(_level, LOG_TAG"i", "%-2d|####%s", \
+ self->threadId, debugStrBuf); \
+ } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = " ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.ll;
+#else
+ s8 val;
+ memcpy(&val, &ptr[idx], 8);
+ return val;
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.ll = val;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#else
+ memcpy(&ptr[idx], &val, 8);
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.d;
+#else
+ double dval;
+ memcpy(&dval, &ptr[idx], 8);
+ return dval;
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.d = dval;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#else
+ memcpy(&ptr[idx], &dval, 8);
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access. Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ (void)putLongToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
+# define GET_REGISTER_FLOAT(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ (void)putDoubleToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
+#else
+# define GET_REGISTER(_idx) (fp[(_idx)])
+# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter. We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset) (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst) ((_inst) & 0xff)
+
+/*
+ * Replace the opcode (used when handling breakpoints). _opcode is a u1.
+ */
+#define INST_REPLACE_OP(_inst, _opcode) (((_inst) & 0xff00) | _opcode)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst) ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst) ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly. If we don't do this,
+ * the offset within the current method won't be shown correctly. See the
+ * notes in Exception.c.
+ *
+ * This is also used to determine the address for precise GC.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+ if (obj == NULL) {
+ dvmThrowNullPointerException(NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsHeapAddress(obj)) {
+ ALOGE("Invalid object %p", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ ALOGE("Invalid object class %p (in %p)", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+ if (obj == NULL) {
+ EXPORT_PC();
+ dvmThrowNullPointerException(NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsHeapAddress(obj)) {
+ ALOGE("Invalid object %p", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ ALOGE("Invalid object class %p (in %p)", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/* File: cstubs/stubdefs.cpp */
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...) \
+ extern "C" void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
+
+/* (void)xxx to quiet unused variable compiler warnings. */
+#define GOTO_TARGET(_target, ...) \
+ void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ const Method* methodToCall; \
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+/*
+ * Redefine what used to be local variable accesses into Thread struct
+ * references. (These are undefined down in "footer.cpp".)
+ */
+#define retval self->interpSave.retval
+#define pc self->interpSave.pc
+#define fp self->interpSave.curFrame
+#define curMethod self->interpSave.method
+#define methodClassDex self->interpSave.methodClassDex
+#define debugTrackedRefStart self->interpSave.debugTrackedRefStart
+
+/* ugh */
+#define STUB_HACK(x) x
+#if defined(WITH_JIT)
+#define JIT_STUB_HACK(x) x
+#else
+#define JIT_STUB_HACK(x)
+#endif
+
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
+#define PC_TO_SELF()
+
+/*
+ * Opcode handler framing macros. Here, each opcode is a separate function
+ * that takes a "self" argument and returns void. We can't declare
+ * these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
+ */
+#define HANDLE_OPCODE(_op) \
+ extern "C" void dvmMterp_##_op(Thread* self); \
+ void dvmMterp_##_op(Thread* self) { \
+ u4 ref; \
+ u2 vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
+
+#define OP_END }
+
+/*
+ * Like the "portable" FINISH, but don't reload "inst", and return to caller
+ * when done. Further, debugger/profiler checks are handled
+ * before handler execution in mterp, so we don't do them here either.
+ */
+#if defined(WITH_JIT)
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { \
+ dvmCheckJit(pc, self); \
+ } \
+ return; \
+ }
+#else
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ return; \
+ }
+#endif
+
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements. Some of the functions take arguments, which in the
+ * portable interpreter are handled by assigning values to globals.
+ */
+
+#define GOTO_exceptionThrown() \
+ do { \
+ dvmMterp_exceptionThrown(self); \
+ return; \
+ } while(false)
+
+#define GOTO_returnFromMethod() \
+ do { \
+ dvmMterp_returnFromMethod(self); \
+ return; \
+ } while(false)
+
+#define GOTO_invoke(_target, _methodCallRange) \
+ do { \
+ dvmMterp_##_target(self, _methodCallRange); \
+ return; \
+ } while(false)
+
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
+ do { \
+ dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall, \
+ _vsrc1, _vdst); \
+ return; \
+ } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp.
+ */
+#define GOTO_bail() \
+ dvmMterpStdBail(self, false);
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started.
+ */
+#define PERIODIC_CHECKS(_pcadj) { \
+ if (dvmCheckSuspendQuick(self)) { \
+ EXPORT_PC(); /* need for precise GC */ \
+ dvmCheckSuspendPending(self); \
+ } \
+ }
+
+/* File: c/opcommon.cpp */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+ u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor. These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_totype(vdst, \
+ GET_REGISTER##_fromtype(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
+ _tovtype, _tortype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ { \
+ /* spec defines specific handling for +/- inf and NaN values */ \
+ _fromvtype val; \
+ _tovtype intMin, intMax, result; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ val = GET_REGISTER##_fromrtype(vsrc1); \
+ intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
+ intMax = ~intMin; \
+ result = (_tovtype) val; \
+ if (val >= intMax) /* +inf */ \
+ result = intMax; \
+ else if (val <= intMin) /* -inf */ \
+ result = intMin; \
+ else if (val != val) /* NaN */ \
+ result = 0; \
+ else \
+ result = (_tovtype) val; \
+ SET_REGISTER##_tortype(vdst, result); \
+ } \
+ FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
+ FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ int result; \
+ u2 regs; \
+ _varType val1, val2; \
+ vdst = INST_AA(inst); \
+ regs = FETCH(1); \
+ vsrc1 = regs & 0xff; \
+ vsrc2 = regs >> 8; \
+ ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ val1 = GET_REGISTER##_type(vsrc1); \
+ val2 = GET_REGISTER##_type(vsrc2); \
+ if (val1 == val2) \
+ result = 0; \
+ else if (val1 < val2) \
+ result = -1; \
+ else if (val1 > val2) \
+ result = 1; \
+ else \
+ result = (_nanVal); \
+ ILOGV("+ result=%d", result); \
+ SET_REGISTER(vdst, result); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
+ vsrc1 = INST_A(inst); \
+ vsrc2 = INST_B(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
+ branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
+ FINISH(2); \
+ }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
+ vsrc1 = INST_AA(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
+ FINISH(2); \
+ }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
+ FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ secondVal = GET_REGISTER(vsrc2); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ vsrc2 = FETCH(1); \
+ ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s2) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
+ /* won't generate /lit16 instr for this; check anyway */ \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op (s2) vsrc2; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s1) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op ((s1) vsrc2); \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vdst); \
+ secondVal = GET_REGISTER(vsrc1); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vsrc1); \
+ secondVal = GET_REGISTER_WIDE(vsrc2); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vdst); \
+ secondVal = GET_REGISTER_WIDE(vsrc1); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* index */ \
+ ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowArrayIndexOutOfBoundsException( \
+ arrayObj->length, GET_REGISTER(vsrc2)); \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)]); \
+ ILOGV("+ AGET[%d]=%#x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); /* AA: source value */ \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* CC: index */ \
+ ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowArrayIndexOutOfBoundsException( \
+ arrayObj->length, GET_REGISTER(vsrc2)); \
+ GOTO_exceptionThrown(); \
+ } \
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+ ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)] = \
+ GET_REGISTER##_regsize(vdst); \
+ } \
+ FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits. Consider:
+ * short foo = -1 (sets a 32-bit register to 0xffffffff)
+ * iput-quick foo (writes all 32 bits to the field)
+ * short bar = 1 (sets a 32-bit register to 0x00000001)
+ * iput-short (writes the low 16 bits to the field)
+ * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field. This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time. On
+ * a device with a 16-bit data bus this is sub-optimal. (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
+ ILOGV("+ IGETQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->field.name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUTQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+/*
+ * The JIT needs dvmDexGetResolvedField() to return non-null.
+ * Because the portable interpreter is not involved with the JIT
+ * and trace building, we only need the extra check here when this
+ * code is massaged into a stub called from an assembly interpreter.
+ * This is controlled by the JIT_STUB_HACK maco.
+ */
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
+ JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
+ } \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
+ JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
+ } \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->field.name, (u8)GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+/* File: cstubs/enddefs.cpp */
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef curMethod
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+
+/* File: armv5te/debug.cpp */
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose ARM registers, along with some other info.
+ *
+ * This function MUST be compiled in ARM mode -- THUMB will yield bogus
+ * results.
+ *
+ * This will NOT preserve r0-r3/ip.
+ */
+void dvmMterpDumpArmRegs(uint32_t r0, uint32_t r1, uint32_t r2, uint32_t r3)
+{
+ // TODO: Clang does not support asm declaration syntax.
+#ifndef __clang__
+ register uint32_t rPC asm("r4");
+ register uint32_t rFP asm("r5");
+ register uint32_t rSELF asm("r6");
+ register uint32_t rINST asm("r7");
+ register uint32_t rIBASE asm("r8");
+ register uint32_t r9 asm("r9");
+ register uint32_t r10 asm("r10");
+
+ //extern char dvmAsmInstructionStart[];
+
+ printf("REGS: r0=%08x r1=%08x r2=%08x r3=%08x\n", r0, r1, r2, r3);
+ printf(" : rPC=%08x rFP=%08x rSELF=%08x rINST=%08x\n",
+ rPC, rFP, rSELF, rINST);
+ printf(" : rIBASE=%08x r9=%08x r10=%08x\n", rIBASE, r9, r10);
+#endif
+
+ //Thread* self = (Thread*) rSELF;
+ //const Method* method = self->method;
+ printf(" + self is %p\n", dvmThreadSelf());
+ //printf(" + currently in %s.%s %s\n",
+ // method->clazz->descriptor, method->name, method->shorty);
+ //printf(" + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+ //printf(" + next handler for 0x%02x = %p\n",
+ // rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+ StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+ printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+ printf(" prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+ saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc);
+#else
+ printf(" prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+ saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc,
+ *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+ /*
+ * It is a direct (non-virtual) method if it is static, private,
+ * or a constructor.
+ */
+ bool isDirect =
+ ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+ (method->name[0] == '<');
+
+ char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+
+ printf("<%c:%s.%s %s> ",
+ isDirect ? 'D' : 'V',
+ method->clazz->descriptor,
+ method->name,
+ desc);
+
+ free(desc);
+}
+
diff --git a/vm/mterp/rebuild.sh b/vm/mterp/rebuild.sh
index 201432432..03e39a099 100755
--- a/vm/mterp/rebuild.sh
+++ b/vm/mterp/rebuild.sh
@@ -20,7 +20,7 @@
#
set -e
-for arch in portable allstubs armv5te armv5te-vfp armv7-a armv7-a-neon x86 x86-atom; do TARGET_ARCH_EXT=$arch make -f Makefile-mterp; done
+for arch in portable allstubs armv5te armv5te-vfp armv6j armv6-vfp armv7-a armv7-a-neon x86 x86-atom; do TARGET_ARCH_EXT=$arch make -f Makefile-mterp; done
# These aren't actually used, so just go ahead and remove them. The correct
# approach is to prevent them from being generated in the first place, but
diff --git a/vm/native/dalvik_system_VMRuntime.cpp b/vm/native/dalvik_system_VMRuntime.cpp
index 72bad2971..53005f662 100644
--- a/vm/native/dalvik_system_VMRuntime.cpp
+++ b/vm/native/dalvik_system_VMRuntime.cpp
@@ -56,6 +56,37 @@ static void Dalvik_dalvik_system_VMRuntime_nativeSetTargetHeapUtilization(
}
/*
+ * native void nativeSetTargetHeapIdealFree()
+ *
+ * Sets the current IDEAL_FREE, represented as a number
+ * for byte size. Returns the old IDEAL_FREE.
+ *
+ * Note that this is NOT static.
+ */
+static void Dalvik_dalvik_system_VMRuntime_nativeSetTargetHeapIdealFree(
+ const u4* args, JValue* pResult)
+{
+ dvmSetTargetHeapIdealFree(args[1]);
+
+ RETURN_INT(dvmGetTargetHeapIdealFree());
+}
+
+/*
+ * native void nativeSetTargetHeapConcurrentStart()
+ *
+ * Sets the current concurrentStart, represented as a number
+ * for byte size. Returns the old concurrentStart.
+ *
+ * Note that this is NOT static.
+ */
+static void Dalvik_dalvik_system_VMRuntime_nativeSetTargetHeapConcurrentStart(
+ const u4* args, JValue* pResult)
+{
+ dvmSetTargetHeapConcurrentStart(args[1]);
+
+ RETURN_INT(dvmGetTargetHeapConcurrentStart());
+}
+/*
* public native void startJitCompilation()
*
* Callback function from the framework to indicate that an app has gone
@@ -217,6 +248,10 @@ const DalvikNativeMethod dvm_dalvik_system_VMRuntime[] = {
Dalvik_dalvik_system_VMRuntime_getTargetHeapUtilization },
{ "nativeSetTargetHeapUtilization", "(F)V",
Dalvik_dalvik_system_VMRuntime_nativeSetTargetHeapUtilization },
+ { "nativeSetTargetHeapIdealFree", "(I)I",
+ Dalvik_dalvik_system_VMRuntime_nativeSetTargetHeapIdealFree },
+ { "nativeSetTargetHeapConcurrentStart", "(I)I",
+ Dalvik_dalvik_system_VMRuntime_nativeSetTargetHeapConcurrentStart },
{ "newNonMovableArray", "(Ljava/lang/Class;I)Ljava/lang/Object;",
Dalvik_dalvik_system_VMRuntime_newNonMovableArray },
{ "properties", "()[Ljava/lang/String;",
diff --git a/vm/native/dalvik_system_Zygote.cpp b/vm/native/dalvik_system_Zygote.cpp
index 87655c65b..f6843746b 100644
--- a/vm/native/dalvik_system_Zygote.cpp
+++ b/vm/native/dalvik_system_Zygote.cpp
@@ -21,8 +21,13 @@
#include "native/InternalNativePriv.h"
#include <signal.h>
+#if (__GNUC__ == 4 && __GNUC_MINOR__ == 7)
+#include <sys/resource.h>
+#endif
#include <sys/types.h>
#include <sys/wait.h>
+#include <sys/mman.h>
+#include <stdio.h>
#include <grp.h>
#include <errno.h>
#include <paths.h>
@@ -358,6 +363,63 @@ static int setCapabilities(int64_t permitted, int64_t effective)
}
/*
+ * Basic KSM Support
+ */
+#ifndef MADV_MERGEABLE
+#define MADV_MERGEABLE 12
+#endif
+
+static inline void pushAnonymousPagesToKSM(void)
+{
+ FILE *fp;
+ char section[100];
+ char perms[5];
+ unsigned long start, end, misc;
+ int ch, offset;
+
+ fp = fopen("/proc/self/maps","r");
+
+ if (fp != NULL) {
+ while (fscanf(fp, "%lx-%lx %4s %lx %lx:%lx %ld",
+ &start, &end, perms, &misc, &misc, &misc, &misc) == 7)
+ {
+ /* Read the sections name striping any preceeding spaces
+ and truncating to 100char (99 + \0)*/
+ section[0] = 0;
+ offset = 0;
+ while(1)
+ {
+ ch = fgetc(fp);
+ if (ch == '\n' || ch == EOF) {
+ break;
+ }
+ if ((offset == 0) && (ch == ' ')) {
+ continue;
+ }
+ if ((offset + 1) < 100) {
+ section[offset]=ch;
+ section[offset+1]=0;
+ offset++;
+ }
+ }
+ /* now decide if we want to scan the section or not:
+ for now we scan Anonymous (sections with no file name) stack and
+ heap sections*/
+ if (( section[0] == 0) ||
+ (strcmp(section,"[stack]") == 0) ||
+ (strcmp(section,"[heap]") == 0))
+ {
+ /* The section matches pass it into madvise */
+ madvise((void*) start, (size_t) end-start, MADV_MERGEABLE);
+ }
+ if (ch == EOF) {
+ break;
+ }
+ }
+ fclose(fp);
+ }
+}
+/*
* Utility routine to fork zygote and specialize the child process.
*/
static pid_t forkAndSpecializeCommon(const u4* args, bool isSystemServer)
@@ -482,6 +544,7 @@ static pid_t forkAndSpecializeCommon(const u4* args, bool isSystemServer)
ALOGE("error in post-zygote initialization");
dvmAbort();
}
+ pushAnonymousPagesToKSM();
} else if (pid > 0) {
/* the parent process */
}
diff --git a/vm/oo/Resolve.cpp b/vm/oo/Resolve.cpp
index ab3de5bda..a4890a5fc 100644
--- a/vm/oo/Resolve.cpp
+++ b/vm/oo/Resolve.cpp
@@ -219,11 +219,7 @@ Method* dvmResolveMethod(const ClassObject* referrer, u4 methodIdx,
}
if (resMethod == NULL) {
- std::string msg;
- msg += resClass->descriptor;
- msg += ".";
- msg += name;
- dvmThrowNoSuchMethodError(msg.c_str());
+ dvmThrowNoSuchMethodError(name);
return NULL;
}
@@ -337,14 +333,11 @@ Method* dvmResolveInterfaceMethod(const ClassObject* referrer, u4 methodIdx)
DexProto proto;
dexProtoSetFromMethodId(&proto, pDvmDex->pDexFile, pMethodId);
- LOGVV("+++ looking for '%s' in resClass='%s'", methodName, resClass->descriptor);
+ LOGVV("+++ looking for '%s' '%s' in resClass='%s'",
+ methodName, methodSig, resClass->descriptor);
resMethod = dvmFindInterfaceMethodHier(resClass, methodName, &proto);
if (resMethod == NULL) {
- std::string msg;
- msg += resClass->descriptor;
- msg += ".";
- msg += methodName;
- dvmThrowNoSuchMethodError(msg.c_str());
+ dvmThrowNoSuchMethodError(methodName);
return NULL;
}
diff --git a/vm/reflect/Annotation.cpp b/vm/reflect/Annotation.cpp
index 233db0881..7a1cbca59 100644
--- a/vm/reflect/Annotation.cpp
+++ b/vm/reflect/Annotation.cpp
@@ -598,6 +598,10 @@ static Object* convertReturnType(Object* valueObj, ClassObject* methodReturn)
}
ALOGV("HEY: converting valueObj from [%s to [%s",
srcElemClass->descriptor, dstElemClass->descriptor);
+#ifdef LOG_NDEBUG
+ // variable defined but not used => breakage on -Werror
+ (void)srcElemClass;
+#endif
ArrayObject* srcArray = (ArrayObject*) valueObj;
u4 length = srcArray->length;
@@ -1101,9 +1105,9 @@ static const u1* searchEncodedAnnotation(const ClassObject* clazz,
const u1* ptr, const char* name)
{
DexFile* pDexFile = clazz->pDvmDex->pDexFile;
- u4 typeIdx, size;
+ u4 /*typeIdx,*/ size;
- typeIdx = readUleb128(&ptr);
+ /*typeIdx =*/ readUleb128(&ptr);
size = readUleb128(&ptr);
//printf("##### searching ptr=%p type=%u size=%u\n", ptr, typeIdx, size);