summaryrefslogtreecommitdiffstats
path: root/vm/mterp
diff options
context:
space:
mode:
authorRaghu Gandham <raghu@mips.com>2012-05-02 14:27:16 -0700
committerRaghu Gandham <raghu@mips.com>2012-05-02 14:27:16 -0700
commita8b91c52fd8a90b784835dfe1f8898035266c4dd (patch)
tree8a9bb58ee3b78c10cf88a3bac21b7f96d75cd1f7 /vm/mterp
parenta14639df65cc0aefafcddda5aae8b591204e45f9 (diff)
downloadandroid_dalvik-a8b91c52fd8a90b784835dfe1f8898035266c4dd.tar.gz
android_dalvik-a8b91c52fd8a90b784835dfe1f8898035266c4dd.tar.bz2
android_dalvik-a8b91c52fd8a90b784835dfe1f8898035266c4dd.zip
[MIPS] Dalvik fast interpreter support and JIT implementation
Change-Id: I9bb4f6875b7061d3ffaee73f204026cb8ba3ed39 Signed-off-by: Raghu Gandham <raghu@mips.com> Signed-off-by: Chris Dearman <chris@mips.com> Signed-off-by: Douglas Leung <douglas@mips.com> Signed-off-by: Don Padgett <don@mips.com>
Diffstat (limited to 'vm/mterp')
-rw-r--r--vm/mterp/Mterp.cpp4
-rw-r--r--vm/mterp/c/header.cpp8
-rw-r--r--vm/mterp/common/asm-constants.h2
-rw-r--r--vm/mterp/common/mips-defines.h3
-rw-r--r--vm/mterp/config-mips68
-rwxr-xr-xvm/mterp/gen-mterp.py19
-rw-r--r--vm/mterp/mips/ALT_OP_DISPATCH_FF.S10
-rw-r--r--vm/mterp/mips/OP_ADD_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_ADD_DOUBLE_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_ADD_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_ADD_FLOAT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_ADD_INT.S2
-rw-r--r--vm/mterp/mips/OP_ADD_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_ADD_INT_LIT16.S2
-rw-r--r--vm/mterp/mips/OP_ADD_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_ADD_LONG.S10
-rw-r--r--vm/mterp/mips/OP_ADD_LONG_2ADDR.S5
-rw-r--r--vm/mterp/mips/OP_AGET.S31
-rw-r--r--vm/mterp/mips/OP_AGET_BOOLEAN.S2
-rw-r--r--vm/mterp/mips/OP_AGET_BYTE.S2
-rw-r--r--vm/mterp/mips/OP_AGET_CHAR.S2
-rw-r--r--vm/mterp/mips/OP_AGET_OBJECT.S2
-rw-r--r--vm/mterp/mips/OP_AGET_SHORT.S2
-rw-r--r--vm/mterp/mips/OP_AGET_WIDE.S27
-rw-r--r--vm/mterp/mips/OP_AND_INT.S2
-rw-r--r--vm/mterp/mips/OP_AND_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_AND_INT_LIT16.S2
-rw-r--r--vm/mterp/mips/OP_AND_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_AND_LONG.S2
-rw-r--r--vm/mterp/mips/OP_AND_LONG_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_APUT.S27
-rw-r--r--vm/mterp/mips/OP_APUT_BOOLEAN.S2
-rw-r--r--vm/mterp/mips/OP_APUT_BYTE.S2
-rw-r--r--vm/mterp/mips/OP_APUT_CHAR.S2
-rw-r--r--vm/mterp/mips/OP_APUT_OBJECT.S50
-rw-r--r--vm/mterp/mips/OP_APUT_SHORT.S2
-rw-r--r--vm/mterp/mips/OP_APUT_WIDE.S27
-rw-r--r--vm/mterp/mips/OP_ARRAY_LENGTH.S14
-rw-r--r--vm/mterp/mips/OP_BREAKPOINT.S15
-rw-r--r--vm/mterp/mips/OP_CHECK_CAST.S71
-rw-r--r--vm/mterp/mips/OP_CHECK_CAST_JUMBO.S84
-rw-r--r--vm/mterp/mips/OP_CMPG_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_CMPG_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_CMPL_DOUBLE.S70
-rw-r--r--vm/mterp/mips/OP_CMPL_FLOAT.S82
-rw-r--r--vm/mterp/mips/OP_CMP_LONG.S40
-rw-r--r--vm/mterp/mips/OP_CONST.S11
-rw-r--r--vm/mterp/mips/OP_CONST_16.S8
-rw-r--r--vm/mterp/mips/OP_CONST_4.S10
-rw-r--r--vm/mterp/mips/OP_CONST_CLASS.S31
-rw-r--r--vm/mterp/mips/OP_CONST_CLASS_JUMBO.S34
-rw-r--r--vm/mterp/mips/OP_CONST_HIGH16.S9
-rw-r--r--vm/mterp/mips/OP_CONST_STRING.S33
-rw-r--r--vm/mterp/mips/OP_CONST_STRING_JUMBO.S32
-rw-r--r--vm/mterp/mips/OP_CONST_WIDE.S17
-rw-r--r--vm/mterp/mips/OP_CONST_WIDE_16.S11
-rw-r--r--vm/mterp/mips/OP_CONST_WIDE_32.S14
-rw-r--r--vm/mterp/mips/OP_CONST_WIDE_HIGH16.S12
-rw-r--r--vm/mterp/mips/OP_DISPATCH_FF.S4
-rw-r--r--vm/mterp/mips/OP_DIV_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_DIV_DOUBLE_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_DIV_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_DIV_FLOAT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_DIV_INT.S2
-rw-r--r--vm/mterp/mips/OP_DIV_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_DIV_INT_LIT16.S2
-rw-r--r--vm/mterp/mips/OP_DIV_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_DIV_LONG.S6
-rw-r--r--vm/mterp/mips/OP_DIV_LONG_2ADDR.S6
-rw-r--r--vm/mterp/mips/OP_DOUBLE_TO_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_DOUBLE_TO_INT.S80
-rw-r--r--vm/mterp/mips/OP_DOUBLE_TO_LONG.S76
-rw-r--r--vm/mterp/mips/OP_EXECUTE_INLINE.S104
-rw-r--r--vm/mterp/mips/OP_EXECUTE_INLINE_RANGE.S92
-rw-r--r--vm/mterp/mips/OP_FILLED_NEW_ARRAY.S120
-rw-r--r--vm/mterp/mips/OP_FILLED_NEW_ARRAY_JUMBO.S95
-rw-r--r--vm/mterp/mips/OP_FILLED_NEW_ARRAY_RANGE.S2
-rw-r--r--vm/mterp/mips/OP_FILL_ARRAY_DATA.S16
-rw-r--r--vm/mterp/mips/OP_FLOAT_TO_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_FLOAT_TO_INT.S63
-rw-r--r--vm/mterp/mips/OP_FLOAT_TO_LONG.S65
-rw-r--r--vm/mterp/mips/OP_GOTO.S23
-rw-r--r--vm/mterp/mips/OP_GOTO_16.S21
-rw-r--r--vm/mterp/mips/OP_GOTO_32.S32
-rw-r--r--vm/mterp/mips/OP_IF_EQ.S2
-rw-r--r--vm/mterp/mips/OP_IF_EQZ.S2
-rw-r--r--vm/mterp/mips/OP_IF_GE.S2
-rw-r--r--vm/mterp/mips/OP_IF_GEZ.S2
-rw-r--r--vm/mterp/mips/OP_IF_GT.S2
-rw-r--r--vm/mterp/mips/OP_IF_GTZ.S2
-rw-r--r--vm/mterp/mips/OP_IF_LE.S2
-rw-r--r--vm/mterp/mips/OP_IF_LEZ.S2
-rw-r--r--vm/mterp/mips/OP_IF_LT.S2
-rw-r--r--vm/mterp/mips/OP_IF_LTZ.S2
-rw-r--r--vm/mterp/mips/OP_IF_NE.S2
-rw-r--r--vm/mterp/mips/OP_IF_NEZ.S2
-rw-r--r--vm/mterp/mips/OP_IGET.S49
-rw-r--r--vm/mterp/mips/OP_IGET_BOOLEAN.S2
-rw-r--r--vm/mterp/mips/OP_IGET_BOOLEAN_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IGET_BYTE.S3
-rw-r--r--vm/mterp/mips/OP_IGET_BYTE_JUMBO.S3
-rw-r--r--vm/mterp/mips/OP_IGET_CHAR.S3
-rw-r--r--vm/mterp/mips/OP_IGET_CHAR_JUMBO.S3
-rw-r--r--vm/mterp/mips/OP_IGET_JUMBO.S55
-rw-r--r--vm/mterp/mips/OP_IGET_OBJECT.S2
-rw-r--r--vm/mterp/mips/OP_IGET_OBJECT_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IGET_OBJECT_QUICK.S2
-rw-r--r--vm/mterp/mips/OP_IGET_OBJECT_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_IGET_OBJECT_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IGET_QUICK.S17
-rw-r--r--vm/mterp/mips/OP_IGET_SHORT.S3
-rw-r--r--vm/mterp/mips/OP_IGET_SHORT_JUMBO.S3
-rw-r--r--vm/mterp/mips/OP_IGET_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_IGET_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IGET_WIDE.S49
-rw-r--r--vm/mterp/mips/OP_IGET_WIDE_JUMBO.S57
-rw-r--r--vm/mterp/mips/OP_IGET_WIDE_QUICK.S17
-rw-r--r--vm/mterp/mips/OP_IGET_WIDE_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_IGET_WIDE_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_INSTANCE_OF.S82
-rw-r--r--vm/mterp/mips/OP_INSTANCE_OF_JUMBO.S96
-rw-r--r--vm/mterp/mips/OP_INT_TO_BYTE.S2
-rw-r--r--vm/mterp/mips/OP_INT_TO_CHAR.S2
-rw-r--r--vm/mterp/mips/OP_INT_TO_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_INT_TO_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_INT_TO_LONG.S2
-rw-r--r--vm/mterp/mips/OP_INT_TO_SHORT.S2
-rw-r--r--vm/mterp/mips/OP_INVOKE_DIRECT.S42
-rw-r--r--vm/mterp/mips/OP_INVOKE_DIRECT_JUMBO.S43
-rw-r--r--vm/mterp/mips/OP_INVOKE_DIRECT_RANGE.S2
-rw-r--r--vm/mterp/mips/OP_INVOKE_INTERFACE.S28
-rw-r--r--vm/mterp/mips/OP_INVOKE_INTERFACE_JUMBO.S25
-rw-r--r--vm/mterp/mips/OP_INVOKE_INTERFACE_RANGE.S2
-rw-r--r--vm/mterp/mips/OP_INVOKE_OBJECT_INIT_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_INVOKE_OBJECT_INIT_RANGE.S48
-rw-r--r--vm/mterp/mips/OP_INVOKE_STATIC.S54
-rw-r--r--vm/mterp/mips/OP_INVOKE_STATIC_JUMBO.S53
-rw-r--r--vm/mterp/mips/OP_INVOKE_STATIC_RANGE.S2
-rw-r--r--vm/mterp/mips/OP_INVOKE_SUPER.S60
-rw-r--r--vm/mterp/mips/OP_INVOKE_SUPER_JUMBO.S56
-rw-r--r--vm/mterp/mips/OP_INVOKE_SUPER_QUICK.S26
-rw-r--r--vm/mterp/mips/OP_INVOKE_SUPER_QUICK_RANGE.S2
-rw-r--r--vm/mterp/mips/OP_INVOKE_SUPER_RANGE.S2
-rw-r--r--vm/mterp/mips/OP_INVOKE_VIRTUAL.S48
-rw-r--r--vm/mterp/mips/OP_INVOKE_VIRTUAL_JUMBO.S44
-rw-r--r--vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK.S23
-rw-r--r--vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK_RANGE.S2
-rw-r--r--vm/mterp/mips/OP_INVOKE_VIRTUAL_RANGE.S2
-rw-r--r--vm/mterp/mips/OP_IPUT.S50
-rw-r--r--vm/mterp/mips/OP_IPUT_BOOLEAN.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_BOOLEAN_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_BYTE.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_BYTE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_CHAR.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_CHAR_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_JUMBO.S58
-rw-r--r--vm/mterp/mips/OP_IPUT_OBJECT.S56
-rw-r--r--vm/mterp/mips/OP_IPUT_OBJECT_JUMBO.S60
-rw-r--r--vm/mterp/mips/OP_IPUT_OBJECT_QUICK.S21
-rw-r--r--vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_QUICK.S16
-rw-r--r--vm/mterp/mips/OP_IPUT_SHORT.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_SHORT_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_WIDE.S48
-rw-r--r--vm/mterp/mips/OP_IPUT_WIDE_JUMBO.S55
-rw-r--r--vm/mterp/mips/OP_IPUT_WIDE_QUICK.S17
-rw-r--r--vm/mterp/mips/OP_IPUT_WIDE_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_IPUT_WIDE_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_LONG_TO_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_LONG_TO_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_LONG_TO_INT.S10
-rw-r--r--vm/mterp/mips/OP_MONITOR_ENTER.S17
-rw-r--r--vm/mterp/mips/OP_MONITOR_EXIT.S26
-rw-r--r--vm/mterp/mips/OP_MOVE.S10
-rw-r--r--vm/mterp/mips/OP_MOVE_16.S10
-rw-r--r--vm/mterp/mips/OP_MOVE_EXCEPTION.S11
-rw-r--r--vm/mterp/mips/OP_MOVE_FROM16.S10
-rw-r--r--vm/mterp/mips/OP_MOVE_OBJECT.S2
-rw-r--r--vm/mterp/mips/OP_MOVE_OBJECT_16.S2
-rw-r--r--vm/mterp/mips/OP_MOVE_OBJECT_FROM16.S2
-rw-r--r--vm/mterp/mips/OP_MOVE_RESULT.S9
-rw-r--r--vm/mterp/mips/OP_MOVE_RESULT_OBJECT.S2
-rw-r--r--vm/mterp/mips/OP_MOVE_RESULT_WIDE.S11
-rw-r--r--vm/mterp/mips/OP_MOVE_WIDE.S13
-rw-r--r--vm/mterp/mips/OP_MOVE_WIDE_16.S13
-rw-r--r--vm/mterp/mips/OP_MOVE_WIDE_FROM16.S13
-rw-r--r--vm/mterp/mips/OP_MUL_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_MUL_DOUBLE_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_MUL_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_MUL_FLOAT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_MUL_INT.S2
-rw-r--r--vm/mterp/mips/OP_MUL_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_MUL_INT_LIT16.S2
-rw-r--r--vm/mterp/mips/OP_MUL_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_MUL_LONG.S41
-rw-r--r--vm/mterp/mips/OP_MUL_LONG_2ADDR.S28
-rw-r--r--vm/mterp/mips/OP_NEG_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_NEG_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_NEG_INT.S2
-rw-r--r--vm/mterp/mips/OP_NEG_LONG.S3
-rw-r--r--vm/mterp/mips/OP_NEW_ARRAY.S61
-rw-r--r--vm/mterp/mips/OP_NEW_ARRAY_JUMBO.S69
-rw-r--r--vm/mterp/mips/OP_NEW_INSTANCE.S106
-rw-r--r--vm/mterp/mips/OP_NEW_INSTANCE_JUMBO.S108
-rw-r--r--vm/mterp/mips/OP_NOP.S13
-rw-r--r--vm/mterp/mips/OP_NOT_INT.S2
-rw-r--r--vm/mterp/mips/OP_NOT_LONG.S2
-rw-r--r--vm/mterp/mips/OP_OR_INT.S2
-rw-r--r--vm/mterp/mips/OP_OR_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_OR_INT_LIT16.S2
-rw-r--r--vm/mterp/mips/OP_OR_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_OR_LONG.S2
-rw-r--r--vm/mterp/mips/OP_OR_LONG_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_PACKED_SWITCH.S34
-rw-r--r--vm/mterp/mips/OP_REM_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_REM_DOUBLE_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_REM_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_REM_FLOAT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_REM_INT.S2
-rw-r--r--vm/mterp/mips/OP_REM_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_REM_INT_LIT16.S2
-rw-r--r--vm/mterp/mips/OP_REM_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_REM_LONG.S7
-rw-r--r--vm/mterp/mips/OP_REM_LONG_2ADDR.S6
-rw-r--r--vm/mterp/mips/OP_RETURN.S13
-rw-r--r--vm/mterp/mips/OP_RETURN_OBJECT.S2
-rw-r--r--vm/mterp/mips/OP_RETURN_VOID.S3
-rw-r--r--vm/mterp/mips/OP_RETURN_VOID_BARRIER.S3
-rw-r--r--vm/mterp/mips/OP_RETURN_WIDE.S13
-rw-r--r--vm/mterp/mips/OP_RSUB_INT.S3
-rw-r--r--vm/mterp/mips/OP_RSUB_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_SGET.S50
-rw-r--r--vm/mterp/mips/OP_SGET_BOOLEAN.S2
-rw-r--r--vm/mterp/mips/OP_SGET_BOOLEAN_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SGET_BYTE.S2
-rw-r--r--vm/mterp/mips/OP_SGET_BYTE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SGET_CHAR.S2
-rw-r--r--vm/mterp/mips/OP_SGET_CHAR_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SGET_JUMBO.S54
-rw-r--r--vm/mterp/mips/OP_SGET_OBJECT.S2
-rw-r--r--vm/mterp/mips/OP_SGET_OBJECT_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SGET_OBJECT_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_SGET_OBJECT_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SGET_SHORT.S2
-rw-r--r--vm/mterp/mips/OP_SGET_SHORT_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SGET_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_SGET_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SGET_WIDE.S58
-rw-r--r--vm/mterp/mips/OP_SGET_WIDE_JUMBO.S47
-rw-r--r--vm/mterp/mips/OP_SGET_WIDE_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_SGET_WIDE_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SHL_INT.S2
-rw-r--r--vm/mterp/mips/OP_SHL_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_SHL_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_SHL_LONG.S33
-rw-r--r--vm/mterp/mips/OP_SHL_LONG_2ADDR.S28
-rw-r--r--vm/mterp/mips/OP_SHR_INT.S2
-rw-r--r--vm/mterp/mips/OP_SHR_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_SHR_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_SHR_LONG.S33
-rw-r--r--vm/mterp/mips/OP_SHR_LONG_2ADDR.S28
-rw-r--r--vm/mterp/mips/OP_SPARSE_SWITCH.S2
-rw-r--r--vm/mterp/mips/OP_SPUT.S50
-rw-r--r--vm/mterp/mips/OP_SPUT_BOOLEAN.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_BOOLEAN_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_BYTE.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_BYTE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_CHAR.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_CHAR_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_JUMBO.S55
-rw-r--r--vm/mterp/mips/OP_SPUT_OBJECT.S56
-rw-r--r--vm/mterp/mips/OP_SPUT_OBJECT_JUMBO.S58
-rw-r--r--vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_SHORT.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_SHORT_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_WIDE.S58
-rw-r--r--vm/mterp/mips/OP_SPUT_WIDE_JUMBO.S60
-rw-r--r--vm/mterp/mips/OP_SPUT_WIDE_VOLATILE.S2
-rw-r--r--vm/mterp/mips/OP_SPUT_WIDE_VOLATILE_JUMBO.S2
-rw-r--r--vm/mterp/mips/OP_SUB_DOUBLE.S2
-rw-r--r--vm/mterp/mips/OP_SUB_DOUBLE_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_SUB_FLOAT.S2
-rw-r--r--vm/mterp/mips/OP_SUB_FLOAT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_SUB_INT.S2
-rw-r--r--vm/mterp/mips/OP_SUB_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_SUB_LONG.S10
-rw-r--r--vm/mterp/mips/OP_SUB_LONG_2ADDR.S5
-rw-r--r--vm/mterp/mips/OP_THROW.S15
-rw-r--r--vm/mterp/mips/OP_THROW_VERIFICATION_ERROR.S15
-rw-r--r--vm/mterp/mips/OP_THROW_VERIFICATION_ERROR_JUMBO.S17
-rw-r--r--vm/mterp/mips/OP_UNUSED_27FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_28FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_29FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_2AFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_2BFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_2CFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_2DFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_2EFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_2FFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_30FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_31FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_32FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_33FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_34FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_35FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_36FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_37FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_38FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_39FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_3AFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_3BFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_3CFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_3DFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_3E.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_3EFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_3F.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_3FFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_40.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_40FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_41.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_41FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_42.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_42FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_43.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_43FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_44FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_45FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_46FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_47FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_48FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_49FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_4AFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_4BFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_4CFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_4DFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_4EFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_4FFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_50FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_51FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_52FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_53FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_54FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_55FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_56FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_57FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_58FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_59FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_5AFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_5BFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_5CFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_5DFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_5EFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_5FFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_60FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_61FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_62FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_63FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_64FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_65FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_66FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_67FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_68FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_69FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_6AFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_6BFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_6CFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_6DFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_6EFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_6FFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_70FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_71FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_72FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_73.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_73FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_74FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_75FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_76FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_77FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_78FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_79.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_79FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_7A.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_7AFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_7BFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_7CFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_7DFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_7EFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_7FFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_80FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_81FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_82FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_83FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_84FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_85FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_86FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_87FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_88FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_89FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_8AFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_8BFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_8CFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_8DFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_8EFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_8FFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_90FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_91FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_92FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_93FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_94FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_95FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_96FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_97FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_98FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_99FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_9AFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_9BFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_9CFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_9DFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_9EFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_9FFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A0FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A1FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A2FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A3FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A4FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A5FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A6FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A7FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A8FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_A9FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_AAFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_ABFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_ACFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_ADFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_AEFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_AFFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B0FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B1FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B2FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B3FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B4FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B5FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B6FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B7FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B8FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_B9FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_BAFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_BBFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_BCFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_BDFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_BEFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_BFFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C0FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C1FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C2FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C3FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C4FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C5FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C6FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C7FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C8FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_C9FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_CAFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_CBFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_CCFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_CDFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_CEFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_CFFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D0FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D1FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D2FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D3FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D4FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D5FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D6FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D7FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D8FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_D9FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_DAFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_DBFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_DCFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_DDFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_DEFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_DFFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E0FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E1FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E2FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E3.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E3FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E4.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E4FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E5.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E5FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E6.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E6FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E7.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E7FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E8.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E8FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E9.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_E9FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_EA.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_EAFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_EB.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_EBFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_EC.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_ECFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_ED.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_EDFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_EEFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_EF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_EFFF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_F0FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_F1.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_F1FF.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_FC.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_FD.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_FE.S1
-rw-r--r--vm/mterp/mips/OP_UNUSED_FF.S1
-rw-r--r--vm/mterp/mips/OP_USHR_INT.S2
-rw-r--r--vm/mterp/mips/OP_USHR_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_USHR_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_USHR_LONG.S32
-rw-r--r--vm/mterp/mips/OP_USHR_LONG_2ADDR.S27
-rw-r--r--vm/mterp/mips/OP_XOR_INT.S2
-rw-r--r--vm/mterp/mips/OP_XOR_INT_2ADDR.S2
-rw-r--r--vm/mterp/mips/OP_XOR_INT_LIT16.S2
-rw-r--r--vm/mterp/mips/OP_XOR_INT_LIT8.S2
-rw-r--r--vm/mterp/mips/OP_XOR_LONG.S2
-rw-r--r--vm/mterp/mips/OP_XOR_LONG_2ADDR.S2
-rw-r--r--vm/mterp/mips/alt_stub.S20
-rw-r--r--vm/mterp/mips/bincmp.S35
-rw-r--r--vm/mterp/mips/binflop.S44
-rw-r--r--vm/mterp/mips/binflop2addr.S45
-rw-r--r--vm/mterp/mips/binflopWide.S52
-rw-r--r--vm/mterp/mips/binflopWide2addr.S46
-rw-r--r--vm/mterp/mips/binop.S34
-rw-r--r--vm/mterp/mips/binop2addr.S30
-rw-r--r--vm/mterp/mips/binopLit16.S30
-rw-r--r--vm/mterp/mips/binopLit8.S32
-rw-r--r--vm/mterp/mips/binopWide.S38
-rw-r--r--vm/mterp/mips/binopWide2addr.S34
-rw-r--r--vm/mterp/mips/debug.cpp92
-rw-r--r--vm/mterp/mips/entry.S107
-rw-r--r--vm/mterp/mips/footer.S1205
-rw-r--r--vm/mterp/mips/header.S345
-rw-r--r--vm/mterp/mips/platform.S32
-rw-r--r--vm/mterp/mips/stub.S10
-rw-r--r--vm/mterp/mips/unflop.S32
-rw-r--r--vm/mterp/mips/unflopWide.S32
-rw-r--r--vm/mterp/mips/unflopWider.S33
-rw-r--r--vm/mterp/mips/unop.S19
-rw-r--r--vm/mterp/mips/unopNarrower.S37
-rw-r--r--vm/mterp/mips/unopWide.S22
-rw-r--r--vm/mterp/mips/unopWider.S20
-rw-r--r--vm/mterp/mips/unused.S2
-rw-r--r--vm/mterp/mips/zcmp.S33
-rw-r--r--vm/mterp/out/InterpAsm-mips.S29959
-rw-r--r--vm/mterp/out/InterpC-allstubs.cpp8
-rw-r--r--vm/mterp/out/InterpC-armv5te-vfp.cpp8
-rw-r--r--vm/mterp/out/InterpC-armv5te.cpp8
-rw-r--r--vm/mterp/out/InterpC-armv7-a-neon.cpp8
-rw-r--r--vm/mterp/out/InterpC-armv7-a.cpp8
-rw-r--r--vm/mterp/out/InterpC-mips.cpp2423
-rw-r--r--vm/mterp/out/InterpC-portable.cpp8
-rw-r--r--vm/mterp/out/InterpC-x86-atom.cpp8
-rw-r--r--vm/mterp/out/InterpC-x86.cpp8
-rwxr-xr-xvm/mterp/rebuild.sh2
574 files changed, 39998 insertions, 4 deletions
diff --git a/vm/mterp/Mterp.cpp b/vm/mterp/Mterp.cpp
index 6220e81f9..0cd30fd57 100644
--- a/vm/mterp/Mterp.cpp
+++ b/vm/mterp/Mterp.cpp
@@ -52,7 +52,11 @@ bool dvmCheckAsmConstants()
* which one did, but if any one is too big the total size will
* overflow.
*/
+#if defined(__mips__)
+ const int width = 128;
+#else
const int width = 64;
+#endif
int interpSize = (uintptr_t) dvmAsmInstructionEnd -
(uintptr_t) dvmAsmInstructionStart;
if (interpSize != 0 && interpSize != kNumPackedOpcodes*width) {
diff --git a/vm/mterp/c/header.cpp b/vm/mterp/c/header.cpp
index c7e727ebe..278718824 100644
--- a/vm/mterp/c/header.cpp
+++ b/vm/mterp/c/header.cpp
@@ -61,6 +61,14 @@
#if defined(__ARM_EABI__)
# define NO_UNALIGN_64__UNION
#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
//#define LOG_INSTR /* verbose debugging */
diff --git a/vm/mterp/common/asm-constants.h b/vm/mterp/common/asm-constants.h
index af2275dac..bc6af300b 100644
--- a/vm/mterp/common/asm-constants.h
+++ b/vm/mterp/common/asm-constants.h
@@ -40,7 +40,7 @@
* data structures. Some versions of gcc will hold small enumerated types
* in a char instead of an int.
*/
-#if defined(__ARM_EABI__)
+#if defined(__ARM_EABI__) || defined(__mips__)
# define MTERP_NO_UNALIGN_64
#endif
#if defined(HAVE_SHORT_ENUMS)
diff --git a/vm/mterp/common/mips-defines.h b/vm/mterp/common/mips-defines.h
new file mode 100644
index 000000000..1e11a30c3
--- /dev/null
+++ b/vm/mterp/common/mips-defines.h
@@ -0,0 +1,3 @@
+#define fcc0 $fcc0
+#define fcc1 $fcc1
+
diff --git a/vm/mterp/config-mips b/vm/mterp/config-mips
new file mode 100644
index 000000000..8c5085887
--- /dev/null
+++ b/vm/mterp/config-mips
@@ -0,0 +1,68 @@
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# Configuration for MIPS architecture targets.
+#
+
+handler-style computed-goto
+handler-size 128
+# Need to specify split-ops to generate alt-ops at the end after
+# importing other files.
+split-ops
+
+# source for the instruction table stub
+asm-stub mips/stub.S
+
+# source for alternate entry stub
+asm-alt-stub mips/alt_stub.S
+
+# file header and basic definitions
+import c/header.cpp
+import mips/header.S
+
+# C pre-processor defines for stub C instructions
+import cstubs/stubdefs.cpp
+
+# highly-platform-specific defs
+import mips/platform.S
+
+# common defs for the C helpers; include this before the instruction handlers
+import c/opcommon.cpp
+
+# arch-specific entry point to interpreter
+import mips/entry.S
+
+# opcode list; argument to op-start is default directory
+op-start mips
+
+# OP_BREAKPOINT needs explicit testing
+ op OP_BREAKPOINT c
+
+# OP_DISPATCH_FF needs explicit testing
+ op OP_DISPATCH_FF c
+
+op-end
+
+# "helper" code for C; include if you use any of the C stubs (this generates
+# object code, so it's normally excluded)
+import c/gotoTargets.cpp
+
+# end of defs; include this when cstubs/stubdefs.cpp is included
+import cstubs/enddefs.cpp
+
+# common subroutines for asm
+import mips/footer.S
+import mips/debug.cpp
+alt-ops
diff --git a/vm/mterp/gen-mterp.py b/vm/mterp/gen-mterp.py
index e0b1b2df2..ec7acaf85 100755
--- a/vm/mterp/gen-mterp.py
+++ b/vm/mterp/gen-mterp.py
@@ -25,6 +25,7 @@ from string import Template
interp_defs_file = "../../libdex/DexOpcodes.h" # need opcode list
kNumPackedOpcodes = 512 # TODO: Derive this from DexOpcodes.h.
+splitops = False
verbose = False
handler_size_bits = -1000
handler_size_bytes = -1000
@@ -217,7 +218,18 @@ def opEnd(tokens):
in_op_start = 2
loadAndEmitOpcodes()
-
+ if splitops == False:
+ if generate_alt_table:
+ loadAndEmitAltOpcodes()
+ if style == "jump-table":
+ emitJmpTable("dvmAsmInstructionStart", label_prefix);
+ emitJmpTable("dvmAsmAltInstructionStart", alt_label_prefix);
+
+def genaltop(tokens):
+ if in_op_start != 2:
+ raise DataParseError("alt-op can be specified only after op-end")
+ if len(tokens) != 1:
+ raise DataParseError("opEnd takes no arguments")
if generate_alt_table:
loadAndEmitAltOpcodes()
if style == "jump-table":
@@ -307,7 +319,6 @@ def loadAndEmitOpcodes():
asm_fp.write(" .balign 4\n")
asm_fp.write("dvmAsmSisterStart:\n")
asm_fp.writelines(sister_list)
-
asm_fp.write("\n .size dvmAsmSisterStart, .-dvmAsmSisterStart\n")
asm_fp.write(" .global dvmAsmSisterEnd\n")
asm_fp.write("dvmAsmSisterEnd:\n\n")
@@ -593,6 +604,10 @@ try:
opEntry(tokens)
elif tokens[0] == "handler-style":
setHandlerStyle(tokens)
+ elif tokens[0] == "alt-ops":
+ genaltop(tokens)
+ elif tokens[0] == "split-ops":
+ splitops = True
else:
raise DataParseError, "unrecognized command '%s'" % tokens[0]
if style == None:
diff --git a/vm/mterp/mips/ALT_OP_DISPATCH_FF.S b/vm/mterp/mips/ALT_OP_DISPATCH_FF.S
new file mode 100644
index 000000000..0c542a03f
--- /dev/null
+++ b/vm/mterp/mips/ALT_OP_DISPATCH_FF.S
@@ -0,0 +1,10 @@
+%verify "executed"
+/*
+ * Unlike other alt stubs, we don't want to call dvmCheckBefore() here.
+ * Instead, just treat this as a trampoline to reach the real alt
+ * handler (which will do the dvmCheckBefore() call.
+ */
+ mov ip, rINST, lsr #8 @ ip<- extended opcode
+ add ip, ip, #256 @ add offset for extended opcodes
+ GOTO_OPCODE(ip) @ go to proper extended handler
+
diff --git a/vm/mterp/mips/OP_ADD_DOUBLE.S b/vm/mterp/mips/OP_ADD_DOUBLE.S
new file mode 100644
index 000000000..1d5cebc1f
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(__adddf3)", "instr_f":"add.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_ADD_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_ADD_DOUBLE_2ADDR.S
new file mode 100644
index 000000000..499961f0f
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(__adddf3)", "instr_f":"add.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_ADD_FLOAT.S b/vm/mterp/mips/OP_ADD_FLOAT.S
new file mode 100644
index 000000000..18c94f442
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(__addsf3)", "instr_f":"add.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_ADD_FLOAT_2ADDR.S b/vm/mterp/mips/OP_ADD_FLOAT_2ADDR.S
new file mode 100644
index 000000000..0a3977090
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(__addsf3)", "instr_f":"add.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_ADD_INT.S b/vm/mterp/mips/OP_ADD_INT.S
new file mode 100644
index 000000000..dcbbb7e9e
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"addu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_ADD_INT_2ADDR.S b/vm/mterp/mips/OP_ADD_INT_2ADDR.S
new file mode 100644
index 000000000..8bb3b0c50
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"addu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_ADD_INT_LIT16.S b/vm/mterp/mips/OP_ADD_INT_LIT16.S
new file mode 100644
index 000000000..de45f81dc
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"addu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_ADD_INT_LIT8.S b/vm/mterp/mips/OP_ADD_INT_LIT8.S
new file mode 100644
index 000000000..feaaaa2ce
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"addu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_ADD_LONG.S b/vm/mterp/mips/OP_ADD_LONG.S
new file mode 100644
index 000000000..d57e1cf08
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_LONG.S
@@ -0,0 +1,10 @@
+%verify "executed"
+/*
+ * The compiler generates the following sequence for
+ * [v1 v0] = [a1 a0] + [a3 a2];
+ * addu v0,a2,a0
+ * addu a1,a3,a1
+ * sltu v1,v0,a2
+ * addu v1,v1,a1
+ */
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/vm/mterp/mips/OP_ADD_LONG_2ADDR.S b/vm/mterp/mips/OP_ADD_LONG_2ADDR.S
new file mode 100644
index 000000000..6a87119b9
--- /dev/null
+++ b/vm/mterp/mips/OP_ADD_LONG_2ADDR.S
@@ -0,0 +1,5 @@
+%verify "executed"
+/*
+ *See OP_ADD_LONG.S for details
+ */
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"addu v0, a2, a0", "instr":"addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1" }
diff --git a/vm/mterp/mips/OP_AGET.S b/vm/mterp/mips/OP_AGET.S
new file mode 100644
index 000000000..e1b182a7c
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET.S
@@ -0,0 +1,31 @@
+%default { "load":"lw", "shift":"2" }
+%verify "executed"
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if $shift
+ EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $load a2, offArrayObject_contents(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
diff --git a/vm/mterp/mips/OP_AGET_BOOLEAN.S b/vm/mterp/mips/OP_AGET_BOOLEAN.S
new file mode 100644
index 000000000..d38c466bb
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S" { "load":"lbu", "shift":"0" }
diff --git a/vm/mterp/mips/OP_AGET_BYTE.S b/vm/mterp/mips/OP_AGET_BYTE.S
new file mode 100644
index 000000000..2c0b0be71
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S" { "load":"lb", "shift":"0" }
diff --git a/vm/mterp/mips/OP_AGET_CHAR.S b/vm/mterp/mips/OP_AGET_CHAR.S
new file mode 100644
index 000000000..9146b9766
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S" { "load":"lhu", "shift":"1" }
diff --git a/vm/mterp/mips/OP_AGET_OBJECT.S b/vm/mterp/mips/OP_AGET_OBJECT.S
new file mode 100644
index 000000000..16d500d5d
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S"
diff --git a/vm/mterp/mips/OP_AGET_SHORT.S b/vm/mterp/mips/OP_AGET_SHORT.S
new file mode 100644
index 000000000..ba4c939f5
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_AGET.S" { "load":"lh", "shift":"1" }
diff --git a/vm/mterp/mips/OP_AGET_WIDE.S b/vm/mterp/mips/OP_AGET_WIDE.S
new file mode 100644
index 000000000..896ea4f58
--- /dev/null
+++ b/vm/mterp/mips/OP_AGET_WIDE.S
@@ -0,0 +1,27 @@
+%verify "executed"
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+.L${opcode}_finish:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64_off(a2, a3, a0, offArrayObject_contents)
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a2, a3, rOBJ) # vAA/vAA+1 <- a2/a3
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_AND_INT.S b/vm/mterp/mips/OP_AND_INT.S
new file mode 100644
index 000000000..721129b83
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"and a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_AND_INT_2ADDR.S b/vm/mterp/mips/OP_AND_INT_2ADDR.S
new file mode 100644
index 000000000..4563705ee
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"and a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_AND_INT_LIT16.S b/vm/mterp/mips/OP_AND_INT_LIT16.S
new file mode 100644
index 000000000..81c0a0461
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"and a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_AND_INT_LIT8.S b/vm/mterp/mips/OP_AND_INT_LIT8.S
new file mode 100644
index 000000000..61c1c9d25
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"and a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_AND_LONG.S b/vm/mterp/mips/OP_AND_LONG.S
new file mode 100644
index 000000000..8249617bb
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_AND_LONG_2ADDR.S b/vm/mterp/mips/OP_AND_LONG_2ADDR.S
new file mode 100644
index 000000000..f9bf88f0e
--- /dev/null
+++ b/vm/mterp/mips/OP_AND_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide2addr.S" {"preinstr":"and a0, a0, a2", "instr":"and a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_APUT.S b/vm/mterp/mips/OP_APUT.S
new file mode 100644
index 000000000..7839b6938
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT.S
@@ -0,0 +1,27 @@
+%default { "store":"sw", "shift":"2" }
+%verify "executed"
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if $shift
+ EASN(a0, a0, a1, $shift) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ $store a2, offArrayObject_contents(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_APUT_BOOLEAN.S b/vm/mterp/mips/OP_APUT_BOOLEAN.S
new file mode 100644
index 000000000..eeb974775
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_APUT.S" { "store":"sb", "shift":"0" }
diff --git a/vm/mterp/mips/OP_APUT_BYTE.S b/vm/mterp/mips/OP_APUT_BYTE.S
new file mode 100644
index 000000000..eeb974775
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_APUT.S" { "store":"sb", "shift":"0" }
diff --git a/vm/mterp/mips/OP_APUT_CHAR.S b/vm/mterp/mips/OP_APUT_CHAR.S
new file mode 100644
index 000000000..4c57fb138
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_APUT.S" { "store":"sh", "shift":"1" }
diff --git a/vm/mterp/mips/OP_APUT_OBJECT.S b/vm/mterp/mips/OP_APUT_OBJECT.S
new file mode 100644
index 000000000..1d5b06e50
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_OBJECT.S
@@ -0,0 +1,50 @@
+%verify "executed"
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ *
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t1) # t1 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(rINST, a2) # rINST <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ GET_VREG(rBIX, t1) # rBIX <- vAA
+ # null array object?
+ beqz rINST, common_errNullObject # yes, bail
+
+ LOAD_base_offArrayObject_length(a3, rINST) # a3 <- arrayObj->length
+ EAS2(rOBJ, rINST, a1) # rOBJ <- arrayObj + index*width
+ # compare unsigned index, length
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ /*
+ * On entry:
+ * rINST = vBB (arrayObj)
+ * rBIX = vAA (obj)
+ * rOBJ = offset into array (vBB + vCC * width)
+ */
+ bnez rBIX, .L${opcode}_checks # yes, skip type checks
+.L${opcode}_finish:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sw rBIX, offArrayObject_contents(rOBJ) # vBB[vCC] <- vAA
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%break
+.L${opcode}_checks:
+ LOAD_base_offObject_clazz(a0, rBIX) # a0 <- obj->clazz
+ LOAD_base_offObject_clazz(a1, rINST) # a1 <- arrayObj->clazz
+ JAL(dvmCanPutArrayElement) # test object type vs. array type
+ beqz v0, .L${opcode}_throw # okay ?
+ lw a2, offThread_cardTable(rSELF)
+ srl t1, rINST, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, (t2)
+ b .L${opcode}_finish # yes, skip type checks
+.L${opcode}_throw:
+ LOAD_base_offObject_clazz(a0, rBIX) # a0 <- obj->clazz
+ LOAD_base_offObject_clazz(a1, rINST) # a1 <- arrayObj->clazz
+ EXPORT_PC()
+ JAL(dvmThrowArrayStoreExceptionIncompatibleElement)
+ b common_exceptionThrown
diff --git a/vm/mterp/mips/OP_APUT_SHORT.S b/vm/mterp/mips/OP_APUT_SHORT.S
new file mode 100644
index 000000000..4c57fb138
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_APUT.S" { "store":"sh", "shift":"1" }
diff --git a/vm/mterp/mips/OP_APUT_WIDE.S b/vm/mterp/mips/OP_APUT_WIDE.S
new file mode 100644
index 000000000..0046cd511
--- /dev/null
+++ b/vm/mterp/mips/OP_APUT_WIDE.S
@@ -0,0 +1,27 @@
+%verify "executed"
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t0) # t0 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
+ # compare unsigned index, length
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64_off(a2, a3, a0, offArrayObject_contents) # a2/a3 <- vBB[vCC]
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_ARRAY_LENGTH.S b/vm/mterp/mips/OP_ARRAY_LENGTH.S
new file mode 100644
index 000000000..94160118b
--- /dev/null
+++ b/vm/mterp/mips/OP_ARRAY_LENGTH.S
@@ -0,0 +1,14 @@
+%verify "executed"
+ /*
+ * Return the length of an array.
+ */
+ GET_OPB(a1) # a1 <- B
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a0, a1) # a0 <- vB (object ref)
+ # is object null?
+ beqz a0, common_errNullObject # yup, fail
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- array length
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a3, a2, t0) # vA <- length
+
diff --git a/vm/mterp/mips/OP_BREAKPOINT.S b/vm/mterp/mips/OP_BREAKPOINT.S
new file mode 100644
index 000000000..36248107f
--- /dev/null
+++ b/vm/mterp/mips/OP_BREAKPOINT.S
@@ -0,0 +1,15 @@
+%verify "executed"
+ /*
+ * Breakpoint handler.
+ *
+ * Restart this instruction with the original opcode. By
+ * the time we get here, the breakpoint will have already been
+ * handled.
+ */
+ move a0, rPC
+ JAL(dvmGetOriginalOpcode) # (rPC)
+ FETCH(rINST, 0) # reload OP_BREAKPOINT + rest of inst
+ lw a1, offThread_mainHandlerTable(rSELF)
+ and rINST, 0xff00
+ or rINST, rINST, a0
+ GOTO_OPCODE_BASE(a1, a0)
diff --git a/vm/mterp/mips/OP_CHECK_CAST.S b/vm/mterp/mips/OP_CHECK_CAST.S
new file mode 100644
index 000000000..f29a51f1e
--- /dev/null
+++ b/vm/mterp/mips/OP_CHECK_CAST.S
@@ -0,0 +1,71 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ # check-cast vAA, class /* BBBB */
+ GET_OPA(a3) # a3 <- AA
+ FETCH(a2, 1) # a2 <- BBBB
+ GET_VREG(rOBJ, a3) # rOBJ <- object
+ LOAD_rSELF_methodClassDex(a0) # a0 <- pDvmDex
+ LOAD_base_offDvmDex_pResClasses(a0, a0) # a0 <- pDvmDex->pResClasses
+ # is object null?
+ beqz rOBJ, .L${opcode}_okay # null obj, cast always succeeds
+ LOAD_eas2(a1, a0, a2) # a1 <- resolved class
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- obj->clazz
+ # have we resolved this before?
+ beqz a1, .L${opcode}_resolve # not resolved, do it now
+.L${opcode}_resolved:
+ # same class (trivial success)?
+ bne a0, a1, .L${opcode}_fullcheck # no, do full check
+.L${opcode}_okay:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * a0 holds obj->clazz
+ * a1 holds class resolved from BBBB
+ * rOBJ holds object
+ */
+.L${opcode}_fullcheck:
+ move rBIX,a1 # avoid ClassObject getting clobbered
+ JAL(dvmInstanceofNonTrivial) # v0 <- boolean result
+ # failed?
+ bnez v0, .L${opcode}_okay # no, success
+ b .L${opcode}_castfailure
+%break
+
+.L${opcode}_castfailure:
+ # A cast has failed. We need to throw a ClassCastException with the
+ # class of the object that failed to be cast.
+ EXPORT_PC() # about to throw
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- obj->clazz
+ move a1,rBIX # r1<- desired class
+ JAL(dvmThrowClassCastException)
+ b common_exceptionThrown
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a2 holds BBBB
+ * rOBJ holds object
+ */
+.L${opcode}_resolve:
+ EXPORT_PC() # resolve() could throw
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ move a1, a2 # a1 <- BBBB
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ move a1, v0 # a1 <- class resolved from BBB
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- obj->clazz
+ b .L${opcode}_resolved # pick up where we left off
diff --git a/vm/mterp/mips/OP_CHECK_CAST_JUMBO.S b/vm/mterp/mips/OP_CHECK_CAST_JUMBO.S
new file mode 100644
index 000000000..966ffab9e
--- /dev/null
+++ b/vm/mterp/mips/OP_CHECK_CAST_JUMBO.S
@@ -0,0 +1,84 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast/ jumbo vBBBB, class #AAAAAAAA */
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a3, 3) # a3<- BBBB
+ sll a2,a2,16
+ or a2, a0, a2 # a2<- AAAAaaaa
+
+ GET_VREG(rOBJ, a3) # rOBJ<- object
+ LOAD_rSELF_methodClassDex(a0) # a0<- pDvmDex
+ LOAD_base_offDvmDex_pResClasses(a0, a0) # a0<- pDvmDex->pResClasses
+ # is object null?
+ beqz rOBJ, .L${opcode}_okay # null obj, cast always succeeds
+ LOAD_eas2(a1, a0, a2) # a1<- resolved class
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0<- obj->clazz
+ # have we resolved this before?
+ beqz a1, .L${opcode}_resolve # not resolved, do it now
+.L${opcode}_resolved:
+ # same class (trivial success)?
+ bne a0, a1, .L${opcode}_fullcheck # no, do full check
+ b .L${opcode}_okay # yes, finish up
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * a0 holds obj->clazz
+ * a1 holds class resolved from BBBB
+ * rOBJ holds object
+ */
+.L${opcode}_fullcheck:
+ move rBIX,a1 # avoid ClassObject getting clobbered
+ JAL(dvmInstanceofNonTrivial) # v0<- boolean result
+ # failed?
+ bnez v0, .L${opcode}_okay # no, success
+ b .L${opcode}_castfailure
+
+%break
+
+
+.L${opcode}_castfailure:
+ # A cast has failed. We need to throw a ClassCastException with the
+ # class of the object that failed to be cast.
+ EXPORT_PC() # about to throw
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0<- obj->clazz
+ move a1,rBIX # r1<- desired class
+ JAL(dvmThrowClassCastException)
+ b common_exceptionThrown
+
+ /*
+ * Advance PC and get next opcode
+ *
+ */
+.L${opcode}_okay:
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a2 holds AAAAAAAA
+ * rOBJ holds object
+ */
+.L${opcode}_resolve:
+ EXPORT_PC() # resolve() could throw
+ LOAD_rSELF_method(a3) # a3<- self->method
+ move a1, a2 # a1<- AAAAAAAA
+ li a2, 0 # a2<- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0<- method->clazz
+ JAL(dvmResolveClass) # v0<- resolved ClassObject ptr
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ move a1, v0 # a1<- class resolved from AAAAAAAA
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0<- obj->clazz
+ b .L${opcode}_resolved # pick up where we left off
+
+
diff --git a/vm/mterp/mips/OP_CMPG_DOUBLE.S b/vm/mterp/mips/OP_CMPG_DOUBLE.S
new file mode 100644
index 000000000..8e740e3e4
--- /dev/null
+++ b/vm/mterp/mips/OP_CMPG_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_CMPL_DOUBLE.S" { "naninst":"li rTEMP, 1" }
diff --git a/vm/mterp/mips/OP_CMPG_FLOAT.S b/vm/mterp/mips/OP_CMPG_FLOAT.S
new file mode 100644
index 000000000..2c4e97bf0
--- /dev/null
+++ b/vm/mterp/mips/OP_CMPG_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_CMPL_FLOAT.S" { "naninst":"li rTEMP, 1" }
diff --git a/vm/mterp/mips/OP_CMPL_DOUBLE.S b/vm/mterp/mips/OP_CMPL_DOUBLE.S
new file mode 100644
index 000000000..63bb0055e
--- /dev/null
+++ b/vm/mterp/mips/OP_CMPL_DOUBLE.S
@@ -0,0 +1,70 @@
+%default { "naninst":"li rTEMP, -1" }
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+
+ FETCH(a0, 1) # a0 <- CCBB
+ and rOBJ, a0, 255 # s0 <- BB
+ srl rBIX, a0, 8 # t0 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s0 <- &fp[BB]
+ EAS2(rBIX, rFP, rBIX) # t0 <- &fp[CC]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1
+ JAL(__eqdf2) # cmp <=: C clear if <, Z set if eq
+ li rTEMP, 0
+ beqz v0, ${opcode}_finish
+
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1
+ JAL(__ltdf2)
+ li rTEMP, -1
+ bltz v0, ${opcode}_finish
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
+ b ${opcode}_continue
+#else
+ LOAD64_F(fs0, fs0f, rOBJ)
+ LOAD64_F(fs1, fs1f, rBIX)
+ c.olt.d fcc0, fs0, fs1
+ li rTEMP, -1
+ bc1t fcc0, ${opcode}_finish
+ c.olt.d fcc0, fs1, fs0
+ li rTEMP, 1
+ bc1t fcc0, ${opcode}_finish
+ c.eq.d fcc0, fs0, fs1
+ li rTEMP, 0
+ bc1t fcc0, ${opcode}_finish
+ b ${opcode}_nan
+#endif
+%break
+
+${opcode}_nan:
+ $naninst
+ b ${opcode}_finish
+
+#ifdef SOFT_FLOAT
+${opcode}_continue:
+ LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1
+ JAL(__gtdf2) # fallthru
+ li rTEMP, 1 # rTEMP = 1 if v0 != 0
+ blez v0, ${opcode}_nan # fall thru for finish
+#endif
+
+${opcode}_finish:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
diff --git a/vm/mterp/mips/OP_CMPL_FLOAT.S b/vm/mterp/mips/OP_CMPL_FLOAT.S
new file mode 100644
index 000000000..6e070844d
--- /dev/null
+++ b/vm/mterp/mips/OP_CMPL_FLOAT.S
@@ -0,0 +1,82 @@
+%default { "naninst":"li rTEMP, -1" }
+%verify "executed"
+%verify "basic lt, gt, eq */
+%verify "left arg NaN"
+%verify "right arg NaN"
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+
+ /* "clasic" form */
+ FETCH(a0, 1) # a0 <- CCBB
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8
+#ifdef SOFT_FLOAT
+ GET_VREG(rOBJ, a2) # rOBJ <- vBB
+ GET_VREG(rBIX, a3) # rBIX <- vCC
+ move a0, rOBJ # a0 <- vBB
+ move a1, rBIX # a1 <- vCC
+ JAL(__eqsf2) # a0 <- (vBB == vCC)
+ li rTEMP, 0 # set rTEMP to 0
+ beqz v0, ${opcode}_finish
+ move a0, rOBJ # a0 <- vBB
+ move a1, rBIX # a1 <- vCC
+ JAL(__ltsf2) # a0 <- (vBB < vCC)
+ li rTEMP, -1
+ bltz v0, ${opcode}_finish
+ move a0, rOBJ # a0 <- vBB
+ move a1, rBIX # a1 <- vCC
+ b ${opcode}_continue
+#else
+ GET_VREG_F(fs0, a2)
+ GET_VREG_F(fs1, a3)
+ c.olt.s fcc0, fs0, fs1 # Is fs0 < fs1
+ li rTEMP, -1
+ bc1t fcc0, ${opcode}_finish
+ c.olt.s fcc0, fs1, fs0
+ li rTEMP, 1
+ bc1t fcc0, ${opcode}_finish
+ c.eq.s fcc0, fs0, fs1
+ li rTEMP, 0
+ bc1t fcc0, ${opcode}_finish
+ b ${opcode}_nan
+
+#endif
+
+%break
+
+${opcode}_nan:
+ $naninst
+ b ${opcode}_finish
+
+#ifdef SOFT_FLOAT
+${opcode}_continue:
+ JAL(__gtsf2) # v0 <- (vBB > vCC)
+ li rTEMP, 1 # rTEMP = 1 if v0 != 0
+ bgtz v0, ${opcode}_finish
+ b ${opcode}_nan
+#endif
+
+${opcode}_finish:
+ GET_OPA(t0)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ SET_VREG(rTEMP, t0) # vAA <- rTEMP
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0)
diff --git a/vm/mterp/mips/OP_CMP_LONG.S b/vm/mterp/mips/OP_CMP_LONG.S
new file mode 100644
index 000000000..fcdfce7da
--- /dev/null
+++ b/vm/mterp/mips/OP_CMP_LONG.S
@@ -0,0 +1,40 @@
+%verify "executed"
+%verify "basic lt, gt, eq"
+%verify "hi equal, lo <=>"
+%verify "lo equal, hi <=>"
+ /*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * I think I can improve on the ARM code by the following observation
+ * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
+ * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
+ * subu v0, t0, t1 # v0= -1:1:0 for [ < > = ]
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(a3, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, a3) # a2/a3 <- vCC/vCC+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ slt t0, a1, a3 # compare hi
+ sgt t1, a1, a3
+ subu v0, t1, t0 # v0 <- (-1, 1, 0)
+ bnez v0, .L${opcode}_finish
+ # at this point x.hi==y.hi
+ sltu t0, a0, a2 # compare lo
+ sgtu t1, a0, a2
+ subu v0, t1, t0 # v0 <- (-1, 1, 0) for [< > =]
+
+.L${opcode}_finish:
+ SET_VREG(v0, rOBJ) # vAA <- v0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_CONST.S b/vm/mterp/mips/OP_CONST.S
new file mode 100644
index 000000000..309b52a03
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST.S
@@ -0,0 +1,11 @@
+%verify "executed"
+ # const vAA, /* +BBBBbbbb */
+ GET_OPA(a3) # a3 <- AA
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ sll a1, a1, 16
+ or a0, a1, a0 # a0 <- BBBBbbbb
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
diff --git a/vm/mterp/mips/OP_CONST_16.S b/vm/mterp/mips/OP_CONST_16.S
new file mode 100644
index 000000000..69732f4d4
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_16.S
@@ -0,0 +1,8 @@
+%verify "executed"
+ # const/16 vAA, /* +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
diff --git a/vm/mterp/mips/OP_CONST_4.S b/vm/mterp/mips/OP_CONST_4.S
new file mode 100644
index 000000000..833e373fe
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_4.S
@@ -0,0 +1,10 @@
+%verify "executed"
+ # const/4 vA, /* +B */
+ sll a1, rINST, 16 # a1 <- Bxxx0000
+ GET_OPA(a0) # a0 <- A+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sra a1, a1, 28 # a1 <- sssssssB (sign-extended)
+ and a0, a0, 15
+ GET_INST_OPCODE(t0) # ip <- opcode from rINST
+ SET_VREG_GOTO(a1, a0, t0) # fp[A] <- a1
+
diff --git a/vm/mterp/mips/OP_CONST_CLASS.S b/vm/mterp/mips/OP_CONST_CLASS.S
new file mode 100644
index 000000000..f63d7c3eb
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_CLASS.S
@@ -0,0 +1,31 @@
+%verify "executed"
+%verify "Class already resolved"
+%verify "Class not yet resolved"
+%verify "Class cannot be resolved"
+ # const/class vAA, Class /* BBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_rSELF_methodClassDex(a2) # a2 <- self->methodClassDex
+ GET_OPA(rOBJ) # rOBJ <- AA
+ LOAD_base_offDvmDex_pResClasses(a2, a2) # a2 <- dvmDex->pResClasses
+ LOAD_eas2(v0, a2, a1) # v0 <- pResClasses[BBBB]
+
+ bnez v0, .L${opcode}_resolve # v0!=0 => resolved-ok
+ /*
+ * Continuation if the Class has not yet been resolved.
+ * a1: BBBB (Class ref)
+ * rOBJ: target register
+ */
+ EXPORT_PC()
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ li a2, 1 # a2 <- true
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- Class reference
+ # failed==0?
+ beqz v0, common_exceptionThrown # yup, handle the exception
+
+.L${opcode}_resolve:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
+
+
diff --git a/vm/mterp/mips/OP_CONST_CLASS_JUMBO.S b/vm/mterp/mips/OP_CONST_CLASS_JUMBO.S
new file mode 100644
index 000000000..05604b915
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_CLASS_JUMBO.S
@@ -0,0 +1,34 @@
+%verify "executed"
+%verify "Class already resolved"
+%verify "Class not yet resolved"
+%verify "Class cannot be resolved"
+ /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ LOAD_rSELF_methodClassDex(a2) # a2 <- self->methodClassDex
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResClasses(a2, a2) # a2 <- dvmDex->pResClasses
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ FETCH(rOBJ, 3) # rOBJ<- BBBB
+ LOAD_eas2(v0, a2, a1) # v0 <- pResClasses[BBBB]
+
+ bnez v0, .L${opcode}_resolve # v0!=0 => resolved-ok
+ /*
+ * Continuation if the Class has not yet been resolved.
+ * a1: AAAAAAAA (Class ref)
+ * rOBJ: target register
+ */
+ EXPORT_PC()
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ li a2, 1 # a2 <- true
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- Class reference
+ # failed==0?
+ beqz v0, common_exceptionThrown # yup, handle the exception
+
+.L${opcode}_resolve:
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vBBBB <- v0
+
+
diff --git a/vm/mterp/mips/OP_CONST_HIGH16.S b/vm/mterp/mips/OP_CONST_HIGH16.S
new file mode 100644
index 000000000..04c6d5d4f
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_HIGH16.S
@@ -0,0 +1,9 @@
+%verify "executed"
+ # const/high16 vAA, /* +BBBB0000 */
+ FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ sll a0, a0, 16 # a0 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
diff --git a/vm/mterp/mips/OP_CONST_STRING.S b/vm/mterp/mips/OP_CONST_STRING.S
new file mode 100644
index 000000000..f59b1d69c
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_STRING.S
@@ -0,0 +1,33 @@
+%verify "executed"
+%verify "String already resolved"
+%verify "String not yet resolved"
+%verify "String cannot be resolved"
+ # const/string vAA, String /* BBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_rSELF_methodClassDex(a2) # a2 <- self->methodClassDex
+ GET_OPA(rOBJ) # rOBJ <- AA
+ LOAD_base_offDvmDex_pResStrings(a2, a2) # a2 <- dvmDex->pResStrings
+ LOAD_eas2(v0, a2, a1) # v0 <- pResStrings[BBBB]
+ # not yet resolved?
+ bnez v0, .L${opcode}_resolve
+ /*
+ * Continuation if the String has not yet been resolved.
+ * a1: BBBB (String ref)
+ * rOBJ: target register
+ */
+ EXPORT_PC()
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveString) # v0 <- String reference
+ # failed?
+ beqz v0, common_exceptionThrown # yup, handle the exception
+
+.L${opcode}_resolve:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
+
+
+
+
+
diff --git a/vm/mterp/mips/OP_CONST_STRING_JUMBO.S b/vm/mterp/mips/OP_CONST_STRING_JUMBO.S
new file mode 100644
index 000000000..0c3d0bd8f
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_STRING_JUMBO.S
@@ -0,0 +1,32 @@
+%verify "executed"
+%verify "String already resolved"
+%verify "String not yet resolved"
+%verify "String cannot be resolved"
+ # const/string vAA, String /* BBBBBBBB */
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (high)
+ LOAD_rSELF_methodClassDex(a2) # a2 <- self->methodClassDex
+ GET_OPA(rOBJ) # rOBJ <- AA
+ LOAD_base_offDvmDex_pResStrings(a2, a2) # a2 <- dvmDex->pResStrings
+ sll a1, a1, 16
+ or a1, a1, a0 # a1 <- BBBBbbbb
+ LOAD_eas2(v0, a2, a1) # v0 <- pResStrings[BBBB]
+ bnez v0, .L${opcode}_resolve
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * a1: BBBBBBBB (String ref)
+ * rOBJ: target register
+ */
+ EXPORT_PC()
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveString) # v0 <- String reference
+ # failed?
+ beqz v0, common_exceptionThrown # yup, handle the exception
+
+.L${opcode}_resolve:
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t1) # vAA <- v0
+
diff --git a/vm/mterp/mips/OP_CONST_WIDE.S b/vm/mterp/mips/OP_CONST_WIDE.S
new file mode 100644
index 000000000..ba1c462b3
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_WIDE.S
@@ -0,0 +1,17 @@
+%verify "executed"
+ # const-wide vAA, /* +HHHHhhhhBBBBbbbb */
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (low middle)
+ FETCH(a2, 3) # a2 <- hhhh (high middle)
+ sll a1, 16 #
+ or a0, a1 # a0 <- BBBBbbbb (low word)
+ FETCH(a3, 4) # a3 <- HHHH (high)
+ GET_OPA(t1) # t1 <- AA
+ sll a3, 16
+ or a1, a3, a2 # a1 <- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ EAS2(t1, rFP, t1) # t1 <- &fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, t1) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_CONST_WIDE_16.S b/vm/mterp/mips/OP_CONST_WIDE_16.S
new file mode 100644
index 000000000..d43152990
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_WIDE_16.S
@@ -0,0 +1,11 @@
+%verify "executed"
+ # const-wide/16 vAA, /* +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ sra a1, a0, 31 # a1 <- ssssssss
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a3, rFP, a3) # a3 <- &fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_CONST_WIDE_32.S b/vm/mterp/mips/OP_CONST_WIDE_32.S
new file mode 100644
index 000000000..9cb9a3fa8
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_WIDE_32.S
@@ -0,0 +1,14 @@
+%verify "executed"
+ # const-wide/32 vAA, /* +BBBBbbbb */
+ FETCH(a0, 1) # a0 <- 0000bbbb (low)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ sll a2, a2, 16
+ or a0, a0, a2 # a0 <- BBBBbbbb
+ EAS2(a3, rFP, a3) # a3 <- &fp[AA]
+ sra a1, a0, 31 # a1 <- ssssssss
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_CONST_WIDE_HIGH16.S b/vm/mterp/mips/OP_CONST_WIDE_HIGH16.S
new file mode 100644
index 000000000..c56cd268f
--- /dev/null
+++ b/vm/mterp/mips/OP_CONST_WIDE_HIGH16.S
@@ -0,0 +1,12 @@
+%verify "executed"
+ # const-wide/high16 vAA, /* +BBBB000000000000 */
+ FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ li a0, 0 # a0 <- 00000000
+ sll a1, 16 # a1 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a3, rFP, a3) # a3 <- &fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_DISPATCH_FF.S b/vm/mterp/mips/OP_DISPATCH_FF.S
new file mode 100644
index 000000000..0503c336f
--- /dev/null
+++ b/vm/mterp/mips/OP_DISPATCH_FF.S
@@ -0,0 +1,4 @@
+%verify "executed"
+ srl t0, rINST, 8 # t0<- extended opcode
+ addu t0, t0, 256 # add offset for extended opcodes
+ GOTO_OPCODE(t0) # go to proper extended handler
diff --git a/vm/mterp/mips/OP_DIV_DOUBLE.S b/vm/mterp/mips/OP_DIV_DOUBLE.S
new file mode 100644
index 000000000..a7e030211
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(__divdf3)", "instr_f":"div.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_DIV_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_DIV_DOUBLE_2ADDR.S
new file mode 100644
index 000000000..18e28d797
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(__divdf3)", "instr_f":"div.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_DIV_FLOAT.S b/vm/mterp/mips/OP_DIV_FLOAT.S
new file mode 100644
index 000000000..59bb8d6e1
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(__divsf3)", "instr_f":"div.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_DIV_FLOAT_2ADDR.S b/vm/mterp/mips/OP_DIV_FLOAT_2ADDR.S
new file mode 100644
index 000000000..a0a546f5c
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(__divsf3)", "instr_f":"div.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_DIV_INT.S b/vm/mterp/mips/OP_DIV_INT.S
new file mode 100644
index 000000000..b8454759e
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"div zero, a0, a1; mflo a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_DIV_INT_2ADDR.S b/vm/mterp/mips/OP_DIV_INT_2ADDR.S
new file mode 100644
index 000000000..1f13ad801
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"div zero, a0, a1; mflo a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_DIV_INT_LIT16.S b/vm/mterp/mips/OP_DIV_INT_LIT16.S
new file mode 100644
index 000000000..d75d21057
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"div zero, a0, a1; mflo a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_DIV_INT_LIT8.S b/vm/mterp/mips/OP_DIV_INT_LIT8.S
new file mode 100644
index 000000000..384eb0d2c
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"div zero, a0, a1; mflo a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_DIV_LONG.S b/vm/mterp/mips/OP_DIV_LONG.S
new file mode 100644
index 000000000..bb39d2ad3
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_LONG.S
@@ -0,0 +1,6 @@
+%verify "executed"
+#ifdef HAVE_LITTLE_ENDIAN
+%include "mips/binopWide.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
+#else
+%include "mips/binopWide.S" { "arg0":"a1", "arg1":"a0", "arg2":"a3", "arg3":"a2", "result0":"v1", "result1":"v0", "instr":"JAL(__divdi3)", "chkzero":"1"}
+#endif
diff --git a/vm/mterp/mips/OP_DIV_LONG_2ADDR.S b/vm/mterp/mips/OP_DIV_LONG_2ADDR.S
new file mode 100644
index 000000000..8e751b62d
--- /dev/null
+++ b/vm/mterp/mips/OP_DIV_LONG_2ADDR.S
@@ -0,0 +1,6 @@
+%verify "executed"
+#ifdef HAVE_LITTLE_ENDIAN
+%include "mips/binopWide2addr.S" {"result0":"v0", "result1":"v1", "instr":"JAL(__divdi3)", "chkzero":"1"}
+#else
+%include "mips/binopWide2addr.S" {"arg0":"a1", "arg1":"a0", "arg2":"a3", "arg3":"a2", "result0":"v1", "result1":"v0", "instr":"JAL(__divdi3)", "chkzero":"1"}
+#endif
diff --git a/vm/mterp/mips/OP_DOUBLE_TO_FLOAT.S b/vm/mterp/mips/OP_DOUBLE_TO_FLOAT.S
new file mode 100644
index 000000000..f1e04ea16
--- /dev/null
+++ b/vm/mterp/mips/OP_DOUBLE_TO_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopNarrower.S" {"instr":"JAL(__truncdfsf2)", "instr_f":"cvt.s.d fv0, fa0"}
diff --git a/vm/mterp/mips/OP_DOUBLE_TO_INT.S b/vm/mterp/mips/OP_DOUBLE_TO_INT.S
new file mode 100644
index 000000000..33199c4b1
--- /dev/null
+++ b/vm/mterp/mips/OP_DOUBLE_TO_INT.S
@@ -0,0 +1,80 @@
+%verify "executed"
+%include "mips/unopNarrower.S" {"instr":"b d2i_doconv", "instr_f":"b d2i_doconv"}
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ * Use rBIX / rTEMP as global to hold arguments (they are not bound to a global var)
+ */
+%break
+
+
+d2i_doconv:
+#ifdef SOFT_FLOAT
+ la t0, .LDOUBLE_TO_INT_max
+ LOAD64(rARG2, rARG3, t0)
+ move rBIX, rARG0 # save a0
+ move rTEMP, rARG1 # and a1
+ JAL(__gedf2) # is arg >= maxint?
+
+ move t0, v0
+ li v0, ~0x80000000 # return maxint (7fffffff)
+ bgez t0, .L${opcode}_set_vreg # nonzero == yes
+
+ move rARG0, rBIX # recover arg
+ move rARG1, rTEMP
+ la t0, .LDOUBLE_TO_INT_min
+ LOAD64(rARG2, rARG3, t0)
+ JAL(__ledf2) # is arg <= minint?
+
+ move t0, v0
+ li v0, 0x80000000 # return minint (80000000)
+ blez t0, .L${opcode}_set_vreg # nonzero == yes
+
+ move rARG0, rBIX # recover arg
+ move rARG1, rTEMP
+ move rARG2, rBIX # compare against self
+ move rARG3, rTEMP
+ JAL(__nedf2) # is arg == self?
+
+ move t0, v0 # zero == no
+ li v0, 0
+ bnez t0, .L${opcode}_set_vreg # return zero for NaN
+
+ move rARG0, rBIX # recover arg
+ move rARG1, rTEMP
+ JAL(__fixdfsi) # convert double to int
+ b .L${opcode}_set_vreg
+#else
+ la t0, .LDOUBLE_TO_INT_max
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa1, fa0
+ l.s fv0, .LDOUBLE_TO_INT_maxret
+ bc1t .L${opcode}_set_vreg_f
+
+ la t0, .LDOUBLE_TO_INT_min
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa0, fa1
+ l.s fv0, .LDOUBLE_TO_INT_minret
+ bc1t .L${opcode}_set_vreg_f
+
+ mov.d fa1, fa0
+ c.un.d fcc0, fa0, fa1
+ li.s fv0, 0
+ bc1t .L${opcode}_set_vreg_f
+
+ trunc.w.d fv0, fa0
+ b .L${opcode}_set_vreg_f
+#endif
+
+
+.LDOUBLE_TO_INT_max:
+ .dword 0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+ .dword 0xc1e0000000000000 # minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+ .word 0x7fffffff
+.LDOUBLE_TO_INT_minret:
+ .word 0x80000000
diff --git a/vm/mterp/mips/OP_DOUBLE_TO_LONG.S b/vm/mterp/mips/OP_DOUBLE_TO_LONG.S
new file mode 100644
index 000000000..153d557c3
--- /dev/null
+++ b/vm/mterp/mips/OP_DOUBLE_TO_LONG.S
@@ -0,0 +1,76 @@
+%verify "executed"
+%include "mips/unflopWide.S" {"instr":"b d2l_doconv", "st_result":"STORE64(rRESULT0, rRESULT1, rOBJ)"}
+%break
+
+d2l_doconv:
+#ifdef SOFT_FLOAT
+ la t0, .LDOUBLE_TO_LONG_max
+ LOAD64(rARG2, rARG3, t0)
+ move rBIX, rARG0 # save a0
+ move rTEMP, rARG1 # and a1
+ JAL(__gedf2)
+
+ move t1, v0
+ la t0, .LDOUBLE_TO_LONG_ret_max
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bgez t1, .L${opcode}_set_vreg
+
+ move rARG0, rBIX
+ move rARG1, rTEMP
+ la t0, .LDOUBLE_TO_LONG_min
+ LOAD64(rARG2, rARG3, t0)
+ JAL(__ledf2)
+
+ move t1, v0
+ la t0, .LDOUBLE_TO_LONG_ret_min
+ LOAD64(rRESULT0, rRESULT1, t0)
+ blez t1, .L${opcode}_set_vreg
+
+ move rARG0, rBIX
+ move rARG1, rTEMP
+ move rARG2, rBIX
+ move rARG3, rTEMP
+ JAL(__nedf2)
+
+ move t0, v0
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bnez t0, .L${opcode}_set_vreg
+
+ move rARG0, rBIX
+ move rARG1, rTEMP
+ JAL(__fixdfdi)
+
+#else
+ la t0, .LDOUBLE_TO_LONG_max
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa1, fa0
+ la t0, .LDOUBLE_TO_LONG_ret_max
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1t .L${opcode}_set_vreg
+
+ la t0, .LDOUBLE_TO_LONG_min
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa0, fa1
+ la t0, .LDOUBLE_TO_LONG_ret_min
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1t .L${opcode}_set_vreg
+
+ mov.d fa1, fa0
+ c.un.d fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1t .L${opcode}_set_vreg
+ JAL(__fixdfdi)
+#endif
+ b .L${opcode}_set_vreg
+
+
+.LDOUBLE_TO_LONG_max:
+ .dword 0x43e0000000000000 # maxlong, as a double (high word)
+.LDOUBLE_TO_LONG_min:
+ .dword 0xc3e0000000000000 # minlong, as a double (high word)
+.LDOUBLE_TO_LONG_ret_max:
+ .dword 0x7fffffffffffffff
+.LDOUBLE_TO_LONG_ret_min:
+ .dword 0x8000000000000000
diff --git a/vm/mterp/mips/OP_EXECUTE_INLINE.S b/vm/mterp/mips/OP_EXECUTE_INLINE.S
new file mode 100644
index 000000000..cbc891711
--- /dev/null
+++ b/vm/mterp/mips/OP_EXECUTE_INLINE.S
@@ -0,0 +1,104 @@
+%verify "executed"
+%verify "exception handled"
+ /*
+ * Execute a "native inline" instruction.
+ *
+ * We need to call an InlineOp4Func:
+ * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+ *
+ * The first four args are in a0-a3, pointer to return value storage
+ * is on the stack. The function's return value is a flag that tells
+ * us if an exception was thrown.
+ *
+ * TUNING: could maintain two tables, pointer in Thread and
+ * swap if profiler/debuggger active.
+ */
+ /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+ lhu a2, offThread_subMode(rSELF)
+ FETCH(rBIX, 1) # rBIX <- BBBB
+ EXPORT_PC() # can throw
+ and a2, kSubModeDebugProfile # Any going on?
+ bnez a2, .L${opcode}_debugmode # yes - take slow path
+.L${opcode}_resume:
+ addu a1, rSELF, offThread_retval # a1 <- &self->retval
+ GET_OPB(a0) # a0 <- B
+ # Stack should have 16/20 available
+ sw a1, STACK_OFFSET_ARG04(sp) # push &self->retval
+ BAL(.L${opcode}_continue) # make call; will return after
+ lw gp, STACK_OFFSET_GP(sp) # restore gp
+ # test boolean result of inline
+ beqz v0, common_exceptionThrown # returned false, handle exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+%break
+
+ /*
+ * Extract args, call function.
+ * a0 = #of args (0-4)
+ * rBIX = call index
+ *
+ * Other ideas:
+ * - Use a jump table from the main piece to jump directly into the
+ * AND/LW pairs. Costs a data load, saves a branch.
+ * - Have five separate pieces that do the loading, so we can work the
+ * interleave a little better. Increases code size.
+ */
+.L${opcode}_continue:
+ FETCH(rINST, 2) # rINST <- FEDC
+ beq a0, 0, 0f
+ beq a0, 1, 1f
+ beq a0, 2, 2f
+ beq a0, 3, 3f
+ beq a0, 4, 4f
+ JAL(common_abort) # too many arguments
+
+4:
+ and t0, rINST, 0xf000 # isolate F
+ ESRN(t1, rFP, t0, 10)
+ lw a3, 0(t1) # a3 <- vF (shift right 12, left 2)
+3:
+ and t0, rINST, 0x0f00 # isolate E
+ ESRN(t1, rFP, t0, 6)
+ lw a2, 0(t1) # a2 <- vE
+2:
+ and t0, rINST, 0x00f0 # isolate D
+ ESRN(t1, rFP, t0, 2)
+ lw a1, 0(t1) # a1 <- vD
+1:
+ and t0, rINST, 0x000f # isolate C
+ EASN(t1, rFP, t0, 2)
+ lw a0, 0(t1) # a0 <- vC
+0:
+ la rINST, gDvmInlineOpsTable # table of InlineOperation
+ EAS4(t1, rINST, rBIX) # t1 <- rINST + rBIX<<4
+ lw t9, 0(t1)
+ jr t9 # sizeof=16, "func" is first entry
+ # (not reached)
+
+ /*
+ * We're debugging or profiling.
+ * rBIX: opIndex
+ */
+.L${opcode}_debugmode:
+ move a0, rBIX
+ JAL(dvmResolveInlineNative)
+ beqz v0, .L${opcode}_resume # did it resolve? no, just move on
+ move rOBJ, v0 # remember method
+ move a0, v0
+ move a1, rSELF
+ JAL(dvmFastMethodTraceEnter) # (method, self)
+ addu a1, rSELF, offThread_retval # a1<- &self->retval
+ GET_OPB(a0) # a0 <- B
+ # Stack should have 16/20 available
+ sw a1, 16(sp) # push &self->retval
+ BAL(.L${opcode}_continue) # make call; will return after
+ lw gp, STACK_OFFSET_GP(sp) # restore gp
+ move rINST, v0 # save result of inline
+ move a0, rOBJ # a0<- method
+ move a1, rSELF # a1<- self
+ JAL(dvmFastMethodTraceExit) # (method, self)
+ beqz v0, common_exceptionThrown # returned false, handle exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_EXECUTE_INLINE_RANGE.S b/vm/mterp/mips/OP_EXECUTE_INLINE_RANGE.S
new file mode 100644
index 000000000..3c95a8c5e
--- /dev/null
+++ b/vm/mterp/mips/OP_EXECUTE_INLINE_RANGE.S
@@ -0,0 +1,92 @@
+%verify "executed"
+%verify "exception handled"
+ /*
+ * Execute a "native inline" instruction, using "/range" semantics.
+ * Same idea as execute-inline, but we get the args differently.
+ *
+ * We need to call an InlineOp4Func:
+ * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+ *
+ * The first four args are in a0-a3, pointer to return value storage
+ * is on the stack. The function's return value is a flag that tells
+ * us if an exception was thrown.
+ */
+ /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
+ lhu a2, offThread_subMode(rSELF)
+ FETCH(rBIX, 1) # rBIX<- BBBB
+ EXPORT_PC() # can throw
+ and a2, kSubModeDebugProfile # Any going on?
+ bnez a2, .L${opcode}_debugmode # yes - take slow path
+.L${opcode}_resume:
+ addu a1, rSELF, offThread_retval # a1<- &self->retval
+ GET_OPA(a0)
+ sw a1, STACK_OFFSET_ARG04(sp) # push &self->retval
+ BAL(.L${opcode}_continue) # make call; will return after
+ lw gp, STACK_OFFSET_GP(sp) # restore gp
+ beqz v0, common_exceptionThrown # returned false, handle exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%break
+
+ /*
+ * Extract args, call function.
+ * a0 = #of args (0-4)
+ * rBIX = call index
+ * ra = return addr, above [DO NOT JAL out of here w/o preserving ra]
+ */
+.L${opcode}_continue:
+ FETCH(rOBJ, 2) # rOBJ <- CCCC
+ beq a0, 0, 0f
+ beq a0, 1, 1f
+ beq a0, 2, 2f
+ beq a0, 3, 3f
+ beq a0, 4, 4f
+ JAL(common_abort) # too many arguments
+
+4:
+ add t0, rOBJ, 3
+ GET_VREG(a3, t0)
+3:
+ add t0, rOBJ, 2
+ GET_VREG(a2, t0)
+2:
+ add t0, rOBJ, 1
+ GET_VREG(a1, t0)
+1:
+ GET_VREG(a0, rOBJ)
+0:
+ la rOBJ, gDvmInlineOpsTable # table of InlineOperation
+ EAS4(t1, rOBJ, rBIX) # t1 <- rINST + rBIX<<4
+ lw t9, 0(t1)
+ jr t9 # sizeof=16, "func" is first entry
+ # not reached
+
+ /*
+ * We're debugging or profiling.
+ * rBIX: opIndex
+ */
+.L${opcode}_debugmode:
+ move a0, rBIX
+ JAL(dvmResolveInlineNative)
+ beqz v0, .L${opcode}_resume # did it resolve? no, just move on
+ move rOBJ, v0 # remember method
+ move a0, v0
+ move a1, rSELF
+ JAL(dvmFastMethodTraceEnter) # (method, self)
+ addu a1, rSELF, offThread_retval # a1<- &self->retval
+ GET_OPA(a0) # a0 <- A
+ # Stack should have 16/20 available
+ sw a1, 16(sp) # push &self->retval
+ move rINST, rOBJ # rINST<- method
+ BAL(.L${opcode}_continue) # make call; will return after
+ lw gp, STACK_OFFSET_GP(sp) # restore gp
+ move rOBJ, v0 # save result of inline
+ move a0, rINST # a0<- method
+ move a1, rSELF # a1<- self
+ JAL(dvmFastNativeMethodTraceExit) # (method, self)
+ beqz rOBJ, common_exceptionThrown # returned false, handle exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_FILLED_NEW_ARRAY.S b/vm/mterp/mips/OP_FILLED_NEW_ARRAY.S
new file mode 100644
index 000000000..2cb225d36
--- /dev/null
+++ b/vm/mterp/mips/OP_FILLED_NEW_ARRAY.S
@@ -0,0 +1,120 @@
+%default { "isrange":"0" }
+%verify "executed"
+%verify "unimplemented array type"
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, type /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ EXPORT_PC() # need for resolve and alloc
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved class
+ GET_OPA(rOBJ) # rOBJ <- AA or BA
+ # already resolved?
+ bnez a0, .L${opcode}_continue # yes, continue on
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- call(clazz, ref)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .L${opcode}_continue
+%break
+
+ /*
+ * On entry:
+ * a0 holds array class
+ * rOBJ holds AA or BA
+ */
+.L${opcode}_continue:
+ LOAD_base_offClassObject_descriptor(a3, a0) # a3 <- arrayClass->descriptor
+ li a2, ALLOC_DONT_TRACK # a2 <- alloc flags
+ lbu rINST, 1(a3) # rINST <- descriptor[1]
+ .if $isrange
+ move a1, rOBJ # a1 <- AA (length)
+ .else
+ srl a1, rOBJ, 4 # rOBJ <- B (length)
+ .endif
+ seq t0, rINST, 'I' # array of ints?
+ seq t1, rINST, 'L' # array of objects?
+ or t0, t1
+ seq t1, rINST, '[' # array of arrays?
+ or t0, t1
+ move rBIX, a1 # save length in rBIX
+ beqz t0, .L${opcode}_notimpl # no, not handled yet
+ JAL(dvmAllocArrayByClass) # v0 <- call(arClass, length, flags)
+ # null return?
+ beqz v0, common_exceptionThrown # alloc failed, handle exception
+
+ FETCH(a1, 2) # a1 <- FEDC or CCCC
+ sw v0, offThread_retval(rSELF) # retval.l <- new array
+ sw rINST, (offThread_retval+4)(rSELF) # retval.h <- type
+ addu a0, v0, offArrayObject_contents # a0 <- newArray->contents
+ subu rBIX, rBIX, 1 # length--, check for neg
+ FETCH_ADVANCE_INST(3) # advance to next instr, load rINST
+ bltz rBIX, 2f # was zero, bail
+
+ # copy values from registers into the array
+ # a0=array, a1=CCCC/FEDC, t0=length (from AA or B), rOBJ=AA/BA
+ move t0, rBIX
+ .if $isrange
+ EAS2(a2, rFP, a1) # a2 <- &fp[CCCC]
+1:
+ lw a3, 0(a2) # a3 <- *a2++
+ addu a2, 4
+ subu t0, t0, 1 # count--
+ sw a3, (a0) # *contents++ = vX
+ addu a0, 4
+ bgez t0, 1b
+
+ # continue at 2
+ .else
+ slt t1, t0, 4 # length was initially 5?
+ and a2, rOBJ, 15 # a2 <- A
+ bnez t1, 1f # <= 4 args, branch
+ GET_VREG(a3, a2) # a3 <- vA
+ subu t0, t0, 1 # count--
+ sw a3, 16(a0) # contents[4] = vA
+1:
+ and a2, a1, 15 # a2 <- F/E/D/C
+ GET_VREG(a3, a2) # a3 <- vF/vE/vD/vC
+ srl a1, a1, 4 # a1 <- next reg in low 4
+ subu t0, t0, 1 # count--
+ sw a3, 0(a0) # *contents++ = vX
+ addu a0, a0, 4
+ bgez t0, 1b
+ # continue at 2
+ .endif
+
+2:
+ lw a0, offThread_retval(rSELF) # a0 <- object
+ lw a1, (offThread_retval+4)(rSELF) # a1 <- type
+ seq t1, a1, 'I' # Is int array?
+ bnez t1, 3f
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ srl t3, a0, GC_CARD_SHIFT
+ addu t2, a2, t3
+ sb a2, (t2)
+3:
+ GET_INST_OPCODE(t0) # ip <- opcode from rINST
+ GOTO_OPCODE(t0) # execute it
+
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.L${opcode}_notimpl:
+ la a0, .LstrFilledNewArrayNotImpl
+ JAL(dvmThrowInternalError)
+ b common_exceptionThrown
+
+ /*
+ * Ideally we'd only define this once, but depending on layout we can
+ * exceed the range of the load above.
+ */
diff --git a/vm/mterp/mips/OP_FILLED_NEW_ARRAY_JUMBO.S b/vm/mterp/mips/OP_FILLED_NEW_ARRAY_JUMBO.S
new file mode 100644
index 000000000..a546db2c2
--- /dev/null
+++ b/vm/mterp/mips/OP_FILLED_NEW_ARRAY_JUMBO.S
@@ -0,0 +1,95 @@
+%default { "isrange":"0" }
+%verify "executed"
+%verify "unimplemented array type"
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * TODO: convert most of this into a common subroutine, shared with
+ * OP_FILLED_NEW_ARRAY.S.
+ */
+ /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # r0<- aaaa (lo)
+ FETCH(a1, 2) # r1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved class
+ GET_OPA(rOBJ) # rOBJ <- AA or BA
+ EXPORT_PC() # need for resolve and alloc
+ # already resolved?
+ bnez a0, .L${opcode}_continue # yes, continue on
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- call(clazz, ref)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .L${opcode}_continue
+%break
+
+ /*
+ * On entry:
+ * a0 holds array class
+ * rOBJ holds AA or BA
+ */
+.L${opcode}_continue:
+ LOAD_base_offClassObject_descriptor(a3, a0) # a3 <- arrayClass->descriptor
+ li a2, ALLOC_DONT_TRACK # a2 <- alloc flags
+ lbu rINST, 1(a3) # rINST <- descriptor[1]
+ FETCH(a1, 3) # a1<- BBBB (length)
+ seq t0, rINST, 'I' # array of ints?
+ seq t1, rINST, 'L' # array of objects?
+ or t0, t1
+ seq t1, rINST, '[' # array of arrays?
+ or t0, t1
+ move rBIX, a1 # save length in rBIX
+ beqz t0, .L${opcode}_notimpl # no, not handled yet
+ JAL(dvmAllocArrayByClass) # v0 <- call(arClass, length, flags)
+ # null return?
+ beqz v0, common_exceptionThrown # alloc failed, handle exception
+
+ FETCH(a1, 4) # a1 CCCC
+ sw v0, offThread_retval(rSELF) # retval.l <- new array
+ sw rINST, (offThread_retval+4)(rSELF) # retval.h <- type
+ addu a0, v0, offArrayObject_contents # a0 <- newArray->contents
+ subu rBIX, rBIX, 1 # length--, check for neg
+ FETCH_ADVANCE_INST(5) # advance to next instr, load rINST
+ bltz rBIX, 2f # was zero, bail
+
+ # copy values from registers into the array
+ # a0=array, a1=CCCC, t0=BBBB(length)
+ move t0, rBIX
+ EAS2(a2, rFP, a1) # a2 <- &fp[CCCC]
+1:
+ lw a3, 0(a2) # a3 <- *a2++
+ addu a2, 4
+ subu t0, t0, 1 # count--
+ sw a3, (a0) # *contents++ = vX
+ addu a0, 4
+ bgez t0, 1b
+
+2:
+ lw a0, offThread_retval(rSELF) # a0 <- object
+ lw a1, (offThread_retval+4)(rSELF) # a1 <- type
+ seq t1, a1, 'I' # Is int array?
+ bnez t1, 3f
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ srl t3, a0, GC_CARD_SHIFT
+ addu t2, a2, t3
+ sb a2, (t2)
+3:
+ GET_INST_OPCODE(t0) # ip <- opcode from rINST
+ GOTO_OPCODE(t0) # execute it
+
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.L${opcode}_notimpl:
+ la a0, .LstrFilledNewArrayNotImpl
+ JAL(dvmThrowInternalError)
+ b common_exceptionThrown
diff --git a/vm/mterp/mips/OP_FILLED_NEW_ARRAY_RANGE.S b/vm/mterp/mips/OP_FILLED_NEW_ARRAY_RANGE.S
new file mode 100644
index 000000000..9611796c0
--- /dev/null
+++ b/vm/mterp/mips/OP_FILLED_NEW_ARRAY_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_FILLED_NEW_ARRAY.S" { "isrange":"1" }
diff --git a/vm/mterp/mips/OP_FILL_ARRAY_DATA.S b/vm/mterp/mips/OP_FILL_ARRAY_DATA.S
new file mode 100644
index 000000000..7a97799d1
--- /dev/null
+++ b/vm/mterp/mips/OP_FILL_ARRAY_DATA.S
@@ -0,0 +1,16 @@
+%verify "executed"
+ /* fill-array-data vAA, +BBBBBBBB */
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll a1, a1, 16 # a1 <- BBBBbbbb
+ or a1, a0, a1 # a1 <- BBBBbbbb
+ GET_VREG(a0, a3) # a0 <- vAA (array object)
+ EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
+ EXPORT_PC()
+ JAL(dvmInterpHandleFillArrayData) # fill the array with predefined data
+ # 0 means an exception is thrown
+ beqz v0, common_exceptionThrown # has exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_FLOAT_TO_DOUBLE.S b/vm/mterp/mips/OP_FLOAT_TO_DOUBLE.S
new file mode 100644
index 000000000..1e2120d10
--- /dev/null
+++ b/vm/mterp/mips/OP_FLOAT_TO_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unflopWider.S" {"instr":"JAL(__extendsfdf2)", "instr_f":"cvt.d.s fv0, fa0"}
diff --git a/vm/mterp/mips/OP_FLOAT_TO_INT.S b/vm/mterp/mips/OP_FLOAT_TO_INT.S
new file mode 100644
index 000000000..166d6856d
--- /dev/null
+++ b/vm/mterp/mips/OP_FLOAT_TO_INT.S
@@ -0,0 +1,63 @@
+%verify "executed"
+%include "mips/unflop.S" {"instr":"b f2i_doconv", "instr_f":"b f2i_doconv"}
+%break
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef SOFT_FLOAT
+ li a1, 0x4f000000 # (float)maxint
+ move rBIX, a0
+ JAL(__gesf2) # is arg >= maxint?
+ move t0, v0
+ li v0, ~0x80000000 # return maxint (7fffffff)
+ bgez t0, .L${opcode}_set_vreg
+
+ move a0, rBIX # recover arg
+ li a1, 0xcf000000 # (float)minint
+ JAL(__lesf2)
+
+ move t0, v0
+ li v0, 0x80000000 # return minint (80000000)
+ blez t0, .L${opcode}_set_vreg
+ move a0, rBIX
+ move a1, rBIX
+ JAL(__nesf2)
+
+ move t0, v0
+ li v0, 0 # return zero for NaN
+ bnez t0, .L${opcode}_set_vreg
+
+ move a0, rBIX
+ JAL(__fixsfsi)
+ b .L${opcode}_set_vreg
+#else
+ l.s fa1, .LFLOAT_TO_INT_max
+ c.ole.s fcc0, fa1, fa0
+ l.s fv0, .LFLOAT_TO_INT_ret_max
+ bc1t .L${opcode}_set_vreg_f
+
+ l.s fa1, .LFLOAT_TO_INT_min
+ c.ole.s fcc0, fa0, fa1
+ l.s fv0, .LFLOAT_TO_INT_ret_min
+ bc1t .L${opcode}_set_vreg_f
+
+ mov.s fa1, fa0
+ c.un.s fcc0, fa0, fa1
+ li.s fv0, 0
+ bc1t .L${opcode}_set_vreg_f
+
+ trunc.w.s fv0, fa0
+ b .L${opcode}_set_vreg_f
+#endif
+
+.LFLOAT_TO_INT_max:
+ .word 0x4f000000
+.LFLOAT_TO_INT_min:
+ .word 0xcf000000
+.LFLOAT_TO_INT_ret_max:
+ .word 0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+ .word 0x80000000
+
diff --git a/vm/mterp/mips/OP_FLOAT_TO_LONG.S b/vm/mterp/mips/OP_FLOAT_TO_LONG.S
new file mode 100644
index 000000000..3e7602781
--- /dev/null
+++ b/vm/mterp/mips/OP_FLOAT_TO_LONG.S
@@ -0,0 +1,65 @@
+%verify "executed"
+%include "mips/unflopWider.S" {"instr":"b f2l_doconv", "instr_f":"b f2l_doconv", "st_result":"STORE64(rRESULT0, rRESULT1, rOBJ)"}
+%break
+
+f2l_doconv:
+#ifdef SOFT_FLOAT
+ li a1, 0x5f000000
+ move rBIX, a0
+ JAL(__gesf2)
+
+ move t0, v0
+ li rRESULT0, ~0
+ li rRESULT1, ~0x80000000
+ bgez t0, .L${opcode}_set_vreg
+
+ move a0, rBIX
+ li a1, 0xdf000000
+ JAL(__lesf2)
+
+ move t0, v0
+ li rRESULT0, 0
+ li rRESULT1, 0x80000000
+ blez t0, .L${opcode}_set_vreg
+
+ move a0, rBIX
+ move a1, rBIX
+ JAL(__nesf2)
+
+ move t0, v0
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bnez t0, .L${opcode}_set_vreg
+
+ move a0, rBIX
+ JAL(__fixsfdi)
+
+#else
+ l.s fa1, .LLONG_TO_max
+ c.ole.s fcc0, fa1, fa0
+ li rRESULT0, ~0
+ li rRESULT1, ~0x80000000
+ bc1t .L${opcode}_set_vreg
+
+ l.s fa1, .LLONG_TO_min
+ c.ole.s fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0x80000000
+ bc1t .L${opcode}_set_vreg
+
+ mov.s fa1, fa0
+ c.un.s fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1t .L${opcode}_set_vreg
+
+ JAL(__fixsfdi)
+#endif
+
+ b .L${opcode}_set_vreg
+
+.LLONG_TO_max:
+ .word 0x5f000000
+
+.LLONG_TO_min:
+ .word 0xdf000000
diff --git a/vm/mterp/mips/OP_GOTO.S b/vm/mterp/mips/OP_GOTO.S
new file mode 100644
index 000000000..27c20e357
--- /dev/null
+++ b/vm/mterp/mips/OP_GOTO.S
@@ -0,0 +1,23 @@
+%verify "executed"
+%verify "forward and backward"
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ sll a0, rINST, 16 # a0 <- AAxx0000
+ sra a1, a0, 24 # a1 <- ssssssAA (sign-extended)
+ addu a2, a1, a1 # a2 <- byte offset
+ /* If backwards branch refresh rBASE */
+ bgez a1, 1f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+1:
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bltz a1, common_testUpdateProfile # (a0) check for trace hotness
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_GOTO_16.S b/vm/mterp/mips/OP_GOTO_16.S
new file mode 100644
index 000000000..22c29da33
--- /dev/null
+++ b/vm/mterp/mips/OP_GOTO_16.S
@@ -0,0 +1,21 @@
+%verify "executed"
+%verify "forward and backward"
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ FETCH_S(a0, 1) # a0 <- ssssAAAA (sign-extended)
+ addu a1, a0, a0 # a1 <- byte offset, flags set
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 1f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+1:
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bltz a1, common_testUpdateProfile # (a0) hot trace head?
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_GOTO_32.S b/vm/mterp/mips/OP_GOTO_32.S
new file mode 100644
index 000000000..84598c222
--- /dev/null
+++ b/vm/mterp/mips/OP_GOTO_32.S
@@ -0,0 +1,32 @@
+%verify "executed"
+%verify "forward, backward, self"
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".
+ */
+ /* goto/32 +AAAAAAAA */
+ FETCH(a0, 1) # a0 <- aaaa (lo)
+ FETCH(a1, 2) # a1 <- AAAA (hi)
+ sll a1, a1, 16
+ or a0, a0, a1 # a0 <- AAAAaaaa
+ addu a1, a0, a0 # a1 <- byte offset
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgtz a1, 1f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+1:
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ blez a1, common_testUpdateProfile # (a0) hot trace head?
+#else
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a0, 2f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+2:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_IF_EQ.S b/vm/mterp/mips/OP_IF_EQ.S
new file mode 100644
index 000000000..183ec1b4c
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_EQ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"ne" }
diff --git a/vm/mterp/mips/OP_IF_EQZ.S b/vm/mterp/mips/OP_IF_EQZ.S
new file mode 100644
index 000000000..5587291cc
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_EQZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"ne" }
diff --git a/vm/mterp/mips/OP_IF_GE.S b/vm/mterp/mips/OP_IF_GE.S
new file mode 100644
index 000000000..19bc86f5c
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_GE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"lt" }
diff --git a/vm/mterp/mips/OP_IF_GEZ.S b/vm/mterp/mips/OP_IF_GEZ.S
new file mode 100644
index 000000000..5d4fa0f37
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_GEZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"lt" }
diff --git a/vm/mterp/mips/OP_IF_GT.S b/vm/mterp/mips/OP_IF_GT.S
new file mode 100644
index 000000000..8335bd3e7
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_GT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"le" }
diff --git a/vm/mterp/mips/OP_IF_GTZ.S b/vm/mterp/mips/OP_IF_GTZ.S
new file mode 100644
index 000000000..3c70c3549
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_GTZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"le" }
diff --git a/vm/mterp/mips/OP_IF_LE.S b/vm/mterp/mips/OP_IF_LE.S
new file mode 100644
index 000000000..c1524f95e
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_LE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"gt" }
diff --git a/vm/mterp/mips/OP_IF_LEZ.S b/vm/mterp/mips/OP_IF_LEZ.S
new file mode 100644
index 000000000..fa930aae0
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_LEZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"gt" }
diff --git a/vm/mterp/mips/OP_IF_LT.S b/vm/mterp/mips/OP_IF_LT.S
new file mode 100644
index 000000000..fbda8bc9b
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_LT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"ge" }
diff --git a/vm/mterp/mips/OP_IF_LTZ.S b/vm/mterp/mips/OP_IF_LTZ.S
new file mode 100644
index 000000000..e93dd622a
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_LTZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"ge" }
diff --git a/vm/mterp/mips/OP_IF_NE.S b/vm/mterp/mips/OP_IF_NE.S
new file mode 100644
index 000000000..c484ede92
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_NE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/bincmp.S" { "revcmp":"eq" }
diff --git a/vm/mterp/mips/OP_IF_NEZ.S b/vm/mterp/mips/OP_IF_NEZ.S
new file mode 100644
index 000000000..24cbb6b1b
--- /dev/null
+++ b/vm/mterp/mips/OP_IF_NEZ.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/zcmp.S" { "revcmp":"eq" }
diff --git a/vm/mterp/mips/OP_IGET.S b/vm/mterp/mips/OP_IGET.S
new file mode 100644
index 000000000..ba4fada1d
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET.S
@@ -0,0 +1,49 @@
+%default { "load":"lw", "barrier":" # noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test results
+ move a0, v0
+ bnez v0, .L${opcode}_finish
+ b common_exceptionThrown
+%break
+
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.L${opcode}_finish:
+ #BAL(common_squeak${sqnum})
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ $load a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ $barrier # acquiring load
+ GET_OPA4(a2) # a2 <- A+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_BOOLEAN.S b/vm/mterp/mips/OP_IGET_BOOLEAN.S
new file mode 100644
index 000000000..4f32dbf68
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_BOOLEAN_JUMBO.S b/vm/mterp/mips/OP_IGET_BOOLEAN_JUMBO.S
new file mode 100644
index 000000000..1bb623323
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_BYTE.S b/vm/mterp/mips/OP_IGET_BYTE.S
new file mode 100644
index 000000000..f699e87fe
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_BYTE.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_BYTE_JUMBO.S b/vm/mterp/mips/OP_IGET_BYTE_JUMBO.S
new file mode 100644
index 000000000..a59ee9272
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_BYTE_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_CHAR.S b/vm/mterp/mips/OP_IGET_CHAR.S
new file mode 100644
index 000000000..cb3a03b4f
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_CHAR.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "large values are not sign-extended"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_CHAR_JUMBO.S b/vm/mterp/mips/OP_IGET_CHAR_JUMBO.S
new file mode 100644
index 000000000..408daca70
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_CHAR_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "large values are not sign-extended"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_JUMBO.S b/vm/mterp/mips/OP_IGET_JUMBO.S
new file mode 100644
index 000000000..49920b91f
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_JUMBO.S
@@ -0,0 +1,55 @@
+%default { "load":"lw", "barrier":" # noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Jumbo 32-bit instance field get.
+ *
+ * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+ * iget-char/jumbo, iget-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .L${opcode}_resolved # resolved, continue
+
+%break
+
+.L${opcode}_resolved:
+ # test results
+ move a0, v0
+ beqz a0,common_exceptionThrown
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.L${opcode}_finish:
+ #BAL(common_squeak${sqnum})
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ $load a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ $barrier # acquiring load
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, a2) # fp[BBBB]<- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_OBJECT.S b/vm/mterp/mips/OP_IGET_OBJECT.S
new file mode 100644
index 000000000..4f32dbf68
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_OBJECT_JUMBO.S b/vm/mterp/mips/OP_IGET_OBJECT_JUMBO.S
new file mode 100644
index 000000000..1bb623323
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_OBJECT_QUICK.S b/vm/mterp/mips/OP_IGET_OBJECT_QUICK.S
new file mode 100644
index 000000000..e4f7d0047
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT_QUICK.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_QUICK.S"
diff --git a/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE.S b/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE.S
new file mode 100644
index 000000000..30c67742e
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..00bab9268
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_OBJECT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IGET_QUICK.S b/vm/mterp/mips/OP_IGET_QUICK.S
new file mode 100644
index 000000000..449079693
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+ /* For: iget-quick, iget-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1 #
+ lw a0, 0(t0) # a0 <- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_SHORT.S b/vm/mterp/mips/OP_IGET_SHORT.S
new file mode 100644
index 000000000..f699e87fe
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_SHORT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "mips/OP_IGET.S"
diff --git a/vm/mterp/mips/OP_IGET_SHORT_JUMBO.S b/vm/mterp/mips/OP_IGET_SHORT_JUMBO.S
new file mode 100644
index 000000000..a59ee9272
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_SHORT_JUMBO.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%verify "negative value is sign-extended"
+%include "mips/OP_IGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IGET_VOLATILE.S b/vm/mterp/mips/OP_IGET_VOLATILE.S
new file mode 100644
index 000000000..30c67742e
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IGET_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IGET_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..ed067375e
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IGET_WIDE.S b/vm/mterp/mips/OP_IGET_WIDE.S
new file mode 100644
index 000000000..2cdf80cd0
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE.S
@@ -0,0 +1,49 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Wide 32-bit instance field get.
+ */
+ # iget-wide vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test return code
+ move a0, v0
+ bnez v0, .L${opcode}_finish
+ b common_exceptionThrown
+%break
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.L${opcode}_finish:
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ beqz rOBJ, common_errNullObject # object was null
+ GET_OPA4(a2) # a2 <- A+
+ addu rOBJ, rOBJ, a3 # form address
+ .if $volatile
+ vLOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .else
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a3, rFP, a2) # a3 <- &fp[A]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # fp[A] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_WIDE_JUMBO.S b/vm/mterp/mips/OP_IGET_WIDE_JUMBO.S
new file mode 100644
index 000000000..97819a7fd
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE_JUMBO.S
@@ -0,0 +1,57 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Jumbo 64-bit instance field get.
+ */
+ /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[CCCC], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .L${opcode}_resolved # resolved, continue
+
+%break
+
+.L${opcode}_resolved:
+ # test return code
+ move a0, v0
+ bnez v0, .L${opcode}_finish
+ b common_exceptionThrown
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.L${opcode}_finish:
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ beqz rOBJ, common_errNullObject # object was null
+ GET_OPA4(a2) # a2 <- A+
+ addu rOBJ, rOBJ, a3 # form address
+ .if $volatile
+ vLOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .else
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .endif
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ EAS2(a3, rFP, a2) # a3 <- &fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # fp[BBBB] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_WIDE_QUICK.S b/vm/mterp/mips/OP_IGET_WIDE_QUICK.S
new file mode 100644
index 000000000..f4d8fdb5b
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+ # iget-wide-quick vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1 # t0 <- a3 + a1
+ LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a3, rFP, a2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # fp[A] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IGET_WIDE_VOLATILE.S b/vm/mterp/mips/OP_IGET_WIDE_VOLATILE.S
new file mode 100644
index 000000000..1804fb1e1
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_WIDE.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_IGET_WIDE_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IGET_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..801aa846f
--- /dev/null
+++ b/vm/mterp/mips/OP_IGET_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IGET_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_INSTANCE_OF.S b/vm/mterp/mips/OP_INSTANCE_OF.S
new file mode 100644
index 000000000..f296d444c
--- /dev/null
+++ b/vm/mterp/mips/OP_INSTANCE_OF.S
@@ -0,0 +1,82 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ # instance-of vA, vB, class /* CCCC */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a3) # a0 <- vB (object)
+ LOAD_rSELF_methodClassDex(a2) # a2 <- pDvmDex
+ # is object null?
+ beqz a0, .L${opcode}_store # null obj, not an instance, store a0
+ FETCH(a3, 1) # a3 <- CCCC
+ LOAD_base_offDvmDex_pResClasses(a2, a2) # a2 <- pDvmDex->pResClasses
+ LOAD_eas2(a1, a2, a3) # a1 <- resolved class
+ LOAD_base_offObject_clazz(a0, a0) # a0 <- obj->clazz
+ # have we resolved this before?
+ beqz a1, .L${opcode}_resolve # not resolved, do it now
+.L${opcode}_resolved: # a0=obj->clazz, a1=resolved class
+ # same class (trivial success)?
+ beq a0, a1, .L${opcode}_trivial # yes, trivial finish
+ b .L${opcode}_fullcheck # no, do full check
+
+ /*
+ * Trivial test succeeded, save and bail.
+ * rOBJ holds A
+ */
+.L${opcode}_trivial:
+ li a0, 1 # indicate success
+ # fall thru
+ /*
+ * a0 holds boolean result
+ * rOBJ holds A
+ */
+.L${opcode}_store:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ SET_VREG(a0, rOBJ) # vA <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+%break
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * a0 holds obj->clazz
+ * a1 holds class resolved from BBBB
+ * rOBJ holds A
+ */
+.L${opcode}_fullcheck:
+ JAL(dvmInstanceofNonTrivial) # v0 <- boolean result
+ move a0, v0 # fall through to ${opcode}_store
+ b .L${opcode}_store
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a3 holds BBBB
+ * rOBJ holds A
+ */
+.L${opcode}_resolve:
+ EXPORT_PC() # resolve() could throw
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ move a1, a3 # a1 <- BBBB
+ li a2, 1 # a2 <- true
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ # got null?
+ move a1, v0 # a1 <- class resolved from BBB
+ beqz v0, common_exceptionThrown # yes, handle exception
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, a3) # a0 <- vB (object)
+ LOAD_base_offObject_clazz(a0, a0) # a0 <- obj->clazz
+ b .L${opcode}_resolved # pick up where we left off
+
diff --git a/vm/mterp/mips/OP_INSTANCE_OF_JUMBO.S b/vm/mterp/mips/OP_INSTANCE_OF_JUMBO.S
new file mode 100644
index 000000000..c55a30c2e
--- /dev/null
+++ b/vm/mterp/mips/OP_INSTANCE_OF_JUMBO.S
@@ -0,0 +1,96 @@
+%verify "executed"
+%verify "null object"
+%verify "class cast exception thrown, with correct class name"
+%verify "class cast exception not thrown on same class"
+%verify "class cast exception not thrown on subclass"
+%verify "class not resolved"
+%verify "class already resolved"
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ *
+ * TODO: convert most of this into a common subroutine, shared with
+ * OP_INSTANCE_OF.S.
+ */
+ /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+ FETCH(a3, 4) # a3<- vCCCC
+ FETCH(rOBJ, 3) # rOBJ<- vBBBB
+ GET_VREG(a0, a3) # a0 <- vCCCC (object)
+ LOAD_rSELF_methodClassDex(a2) # a2 <- pDvmDex
+ # is object null?
+ beqz a0, .L${opcode}_store # null obj, not an instance, store a0
+ FETCH(a1, 1) # r1<- aaaa (lo)
+ FETCH(a3, 2) # r3<- AAAA (hi)
+ LOAD_base_offDvmDex_pResClasses(a2, a2) # a2 <- pDvmDex->pResClasses
+ sll a3,a3,16
+ or a3, a1, a3 # a3<- AAAAaaaa
+
+ LOAD_eas2(a1, a2, a3) # a1 <- resolved class
+ LOAD_base_offObject_clazz(a0, a0) # a0 <- obj->clazz
+ # have we resolved this before?
+ beqz a1, .L${opcode}_resolve # not resolved, do it now
+ b .L${opcode}_resolved # resolved, continue
+
+%break
+
+ /*
+ * Class resolved, determine type of check necessary. This is common.
+ * r0 holds obj->clazz
+ * r1 holds class resolved from AAAAAAAA
+ * r9 holds BBBB
+ */
+
+.L${opcode}_resolved: # a0=obj->clazz, a1=resolved class
+ # same class (trivial success)?
+ beq a0, a1, .L${opcode}_trivial # yes, trivial finish
+ # fall through to ${opcode}_fullcheck
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * a0 holds obj->clazz
+ * a1 holds class resolved from AAAAAAAA
+ * rOBJ holds BBBB
+ */
+.L${opcode}_fullcheck:
+ JAL(dvmInstanceofNonTrivial) # v0 <- boolean result
+ move a0, v0
+ b .L${opcode}_store # go to ${opcode}_store
+
+.L${opcode}_trivial:
+ li a0, 1 # indicate success
+ # fall thru
+ /*
+ * a0 holds boolean result
+ * rOBJ holds BBBB
+ */
+.L${opcode}_store:
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, rOBJ) # vBBBB <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a3 holds AAAAAAAA
+ * rOBJ holds BBBB
+ */
+.L${opcode}_resolve:
+ EXPORT_PC() # resolve() could throw
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ move a1, a3 # a1 <- AAAAAAAA
+ li a2, 1 # a2 <- true
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ # got null?
+ move a1, v0 # a1 <- class resolved from BBB
+ beqz v0, common_exceptionThrown # yes, handle exception
+ FETCH(ra, 4) # a3<- vCCCC
+ move a1, a0 # a1<- class resolved from AAAAAAAA
+
+ GET_VREG(a0, a3) # a0 <- vCCCC (object)
+ LOAD_base_offObject_clazz(a0, a0) # a0 <- obj->clazz
+ b .L${opcode}_resolved # pick up where we left off
+
diff --git a/vm/mterp/mips/OP_INT_TO_BYTE.S b/vm/mterp/mips/OP_INT_TO_BYTE.S
new file mode 100644
index 000000000..e9edb97f2
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"preinstr":"sll a0, a0, 24", "instr":"sra a0, a0, 24"}
diff --git a/vm/mterp/mips/OP_INT_TO_CHAR.S b/vm/mterp/mips/OP_INT_TO_CHAR.S
new file mode 100644
index 000000000..5da74da44
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"preinstr":"", "instr":"and a0, 0xffff"}
diff --git a/vm/mterp/mips/OP_INT_TO_DOUBLE.S b/vm/mterp/mips/OP_INT_TO_DOUBLE.S
new file mode 100644
index 000000000..5ee4813fd
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unflopWider.S" {"instr":"JAL(__floatsidf)", "instr_f":"cvt.d.w fv0, fa0"}
diff --git a/vm/mterp/mips/OP_INT_TO_FLOAT.S b/vm/mterp/mips/OP_INT_TO_FLOAT.S
new file mode 100644
index 000000000..9cf7c48a6
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unflop.S" {"instr":"JAL(__floatsisf)", "instr_f":"cvt.s.w fv0, fa0"}
diff --git a/vm/mterp/mips/OP_INT_TO_LONG.S b/vm/mterp/mips/OP_INT_TO_LONG.S
new file mode 100644
index 000000000..5691ea5fd
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopWider.S" {"instr":"sra a1, a0, 31"}
diff --git a/vm/mterp/mips/OP_INT_TO_SHORT.S b/vm/mterp/mips/OP_INT_TO_SHORT.S
new file mode 100644
index 000000000..d1fc34922
--- /dev/null
+++ b/vm/mterp/mips/OP_INT_TO_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"preinstr":"sll a0, 16", "instr":"sra a0, 16"}
diff --git a/vm/mterp/mips/OP_INVOKE_DIRECT.S b/vm/mterp/mips/OP_INVOKE_DIRECT.S
new file mode 100644
index 000000000..9bbf334e3
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_DIRECT.S
@@ -0,0 +1,42 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ FETCH(rBIX, 2) # rBIX <- GFED or CCCC
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+ .if (!$isrange)
+ and rBIX, rBIX, 15 # rBIX <- D (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ # already resolved?
+ bnez a0, 1f # resolved, call the function
+
+ lw a3, offThread_method(rSELF) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_DIRECT # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+
+1:
+ bnez rOBJ, common_invokeMethod${routine} # a0=method, rOBJ="this"
+ b common_errNullObject # yes, throw exception
+
+
+
diff --git a/vm/mterp/mips/OP_INVOKE_DIRECT_JUMBO.S b/vm/mterp/mips/OP_INVOKE_DIRECT_JUMBO.S
new file mode 100644
index 000000000..afe70b7a1
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_DIRECT_JUMBO.S
@@ -0,0 +1,43 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ */
+ /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ FETCH(rBIX, 4) # rBIX <- GFED or CCCC
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+ .if (!$isrange)
+ and rBIX, rBIX, 15 # rBIX <- D (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ # already resolved?
+ bnez a0, 1f # resolved, call the function
+
+ lw a3, offThread_method(rSELF) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_DIRECT # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+
+1:
+ bnez rOBJ, common_invokeMethodJumbo # a0=method, rOBJ="this"
+ b common_errNullObject # yes, throw exception
+
+
+
diff --git a/vm/mterp/mips/OP_INVOKE_DIRECT_RANGE.S b/vm/mterp/mips/OP_INVOKE_DIRECT_RANGE.S
new file mode 100644
index 000000000..ef8801189
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_DIRECT_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_DIRECT.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_INTERFACE.S b/vm/mterp/mips/OP_INVOKE_INTERFACE.S
new file mode 100644
index 000000000..092409338
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_INTERFACE.S
@@ -0,0 +1,28 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(a2, 2) # a2 <- FEDC or CCCC
+ FETCH(a1, 1) # a1 <- BBBB
+ .if (!$isrange)
+ and a2, 15 # a2 <- C (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ GET_VREG(rOBJ, a2) # rOBJ <- first arg ("this")
+ LOAD_rSELF_methodClassDex(a3) # a3 <- methodClassDex
+ LOAD_rSELF_method(a2) # a2 <- method
+ # null obj?
+ beqz rOBJ, common_errNullObject # yes, fail
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- thisPtr->clazz
+ JAL(dvmFindInterfaceMethodInCache) # v0 <- call(class, ref, method, dex)
+ move a0, v0
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b common_invokeMethod${routine} # (a0=method, rOBJ="this")
diff --git a/vm/mterp/mips/OP_INVOKE_INTERFACE_JUMBO.S b/vm/mterp/mips/OP_INVOKE_INTERFACE_JUMBO.S
new file mode 100644
index 000000000..b055d6955
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_INTERFACE_JUMBO.S
@@ -0,0 +1,25 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+ /*
+ * Handle an interface method call.
+ */
+ /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ FETCH(a2, 4) # a2<- CCCC
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ EXPORT_PC() # must export for invoke
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ GET_VREG(rOBJ, a2) # rOBJ <- first arg ("this")
+ LOAD_rSELF_methodClassDex(a3) # a3 <- methodClassDex
+ LOAD_rSELF_method(a2) # a2 <- method
+ # null obj?
+ beqz rOBJ, common_errNullObject # yes, fail
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- thisPtr->clazz
+ JAL(dvmFindInterfaceMethodInCache) # v0 <- call(class, ref, method, dex)
+ move a0, v0
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b common_invokeMethodJumbo # (a0=method, rOBJ="this")
diff --git a/vm/mterp/mips/OP_INVOKE_INTERFACE_RANGE.S b/vm/mterp/mips/OP_INVOKE_INTERFACE_RANGE.S
new file mode 100644
index 000000000..6257c8a5d
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_INTERFACE_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_INTERFACE.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_JUMBO.S b/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_JUMBO.S
new file mode 100644
index 000000000..bd7c46dd6
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_OBJECT_INIT_RANGE.S" {"jumbo":"1", "cccc":"4"}
diff --git a/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_RANGE.S b/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_RANGE.S
new file mode 100644
index 000000000..df0d6c9cd
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_OBJECT_INIT_RANGE.S
@@ -0,0 +1,48 @@
+%default { "jumbo":"0", "cccc":"2" }
+%verify "executed"
+%verify "finalizable class"
+ /*
+ * Invoke Object.<init> on an object. In practice we know that
+ * Object's nullary constructor doesn't do anything, so we just
+ * skip it unless a debugger is active.
+ */
+ FETCH(a1, ${cccc}) # a1<- CCCC
+ GET_VREG(a0, a1) # a0<- "this" ptr
+ # check for NULL
+ beqz a0, common_errNullObject # export PC and throw NPE
+ LOAD_base_offObject_clazz(a1, a0) # a1<- obj->clazz
+ LOAD_base_offClassObject_accessFlags(a2, a1) # a2<- clazz->accessFlags
+ and a2, CLASS_ISFINALIZABLE # is this class finalizable?
+ beqz a2, .L${opcode}_finish # no, go
+
+.L${opcode}_setFinal:
+ EXPORT_PC() # can throw
+ JAL(dvmSetFinalizable) # call dvmSetFinalizable(obj)
+ LOAD_offThread_exception(a0, rSELF) # a0<- self->exception
+ # exception pending?
+ bnez a0, common_exceptionThrown # yes, handle it
+
+.L${opcode}_finish:
+ lhu a1, offThread_subMode(rSELF)
+ and a1, kSubModeDebuggerActive # debugger active?
+ bnez a1, .L${opcode}_debugger # Yes - skip optimization
+ FETCH_ADVANCE_INST(${cccc}+1) # advance to next instr, load rINST
+ GET_INST_OPCODE(t0) # t0<- opcode from rINST
+ GOTO_OPCODE(t0) # execute it
+
+%break
+ /*
+ * A debugger is attached, so we need to go ahead and do
+ * this. For simplicity, we'll just jump directly to the
+ * corresponding handler. Note that we can't use
+ * rIBASE here because it may be in single-step mode.
+ * Load the primary table base directly.
+ */
+.L${opcode}_debugger:
+ lw a1, offThread_mainHandlerTable(rSELF)
+ .if $jumbo
+ li t0, OP_INVOKE_DIRECT_JUMBO
+ .else
+ li t0, OP_INVOKE_DIRECT_RANGE
+ .endif
+ GOTO_OPCODE_BASE(a1, t0) # execute it
diff --git a/vm/mterp/mips/OP_INVOKE_STATIC.S b/vm/mterp/mips/OP_INVOKE_STATIC.S
new file mode 100644
index 000000000..ba2d7cc31
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_STATIC.S
@@ -0,0 +1,54 @@
+%default { "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ li rOBJ, 0 # null "this" in delay slot
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+ EAS2(rBIX, a3, a1) # rBIX<- &resolved_metherToCall
+#endif
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, common_invokeMethod${routine} # yes, continue on
+ b .L${opcode}_resolve
+%break
+
+.L${opcode}_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_STATIC # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we're actively building a trace. If so,
+ * we need to keep this instruction out of it.
+ * rBIX: &resolved_methodToCall
+ */
+ lhu a2, offThread_subMode(rSELF)
+ beqz v0, common_exceptionThrown # null, handle exception
+ and a2, kSubModeJitTraceBuild # trace under construction?
+ beqz a2, common_invokeMethod${routine} # no, (a0=method, rOBJ="this")
+ lw a1, 0(rBIX) # reload resolved method
+ # finished resloving?
+ bnez a1, common_invokeMethod${routine} # yes, (a0=method, rOBJ="this")
+ move rBIX, a0 # preserve method
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) # (self, pc)
+ move a0, rBIX
+ b common_invokeMethod${routine} # whew, finally!
+#else
+ # got null?
+ bnez v0, common_invokeMethod${routine} # (a0=method, rOBJ="this")
+ b common_exceptionThrown # yes, handle exception
+#endif
diff --git a/vm/mterp/mips/OP_INVOKE_STATIC_JUMBO.S b/vm/mterp/mips/OP_INVOKE_STATIC_JUMBO.S
new file mode 100644
index 000000000..80576a212
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_STATIC_JUMBO.S
@@ -0,0 +1,53 @@
+%verify "executed"
+%verify "unknown method"
+ /*
+ * Handle a static method call.
+ */
+ /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ sll a1,a1,16
+ or a1, a0, a1 # r1<- AAAAaaaa
+ li rOBJ, 0 # null "this" in delay slot
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+ EAS2(rBIX, a3, a1) # rBIX<- &resolved_metherToCall
+#endif
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, common_invokeMethodJumboNoThis # (a0 = method)
+ b .L${opcode}_resolve
+%break
+
+.L${opcode}_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_STATIC # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we're actively building a trace. If so,
+ * we need to keep this instruction out of it.
+ * rBIX: &resolved_methodToCall
+ */
+ lhu a2, offThread_subMode(rSELF)
+ beqz v0, common_exceptionThrown # null, handle exception
+ and a2, kSubModeJitTraceBuild # trace under construction?
+ beqz a2, common_invokeMethodJumboNoThis # no, (a0=method, rOBJ="this")
+ lw a1, 0(rBIX) # reload resolved method
+ # finished resloving?
+ bnez a1, common_invokeMethodJumboNoThis # yes, (a0=method, rOBJ="this")
+ move rBIX, a0 # preserve method
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) # (self, pc)
+ move a0, rBIX
+ b common_invokeMethodJumboNoThis # whew, finally!
+#else
+ # got null?
+ bnez v0, common_invokeMethodJumboNoThis # (a0=method, rOBJ="this")
+ b common_exceptionThrown # yes, handle exception
+#endif
diff --git a/vm/mterp/mips/OP_INVOKE_STATIC_RANGE.S b/vm/mterp/mips/OP_INVOKE_STATIC_RANGE.S
new file mode 100644
index 000000000..9b45216e7
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_STATIC_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_STATIC.S" { "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER.S b/vm/mterp/mips/OP_INVOKE_SUPER.S
new file mode 100644
index 000000000..6b4438026
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER.S
@@ -0,0 +1,60 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ FETCH(t0, 2) # t0 <- GFED or CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ .if (!$isrange)
+ and t0, t0, 15 # t0 <- D (or stays CCCC)
+ .endif
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ GET_VREG(rOBJ, t0) # rOBJ <- "this" ptr
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ # null "this"?
+ LOAD_rSELF_method(t1) # t1 <- current method
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ # cmp a0, 0; already resolved?
+ LOAD_base_offMethod_clazz(rBIX, t1) # rBIX <- method->clazz
+ EXPORT_PC() # must export for invoke
+ bnez a0, .L${opcode}_continue # resolved, continue on
+
+ move a0, rBIX # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .L${opcode}_continue
+%break
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX = method->clazz
+ */
+.L${opcode}_continue:
+ LOAD_base_offClassObject_super(a1, rBIX) # a1 <- method->clazz->super
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ LOAD_base_offClassObject_vtableCount(a3, a1) # a3 <- super->vtableCount
+ EXPORT_PC() # must export for invoke
+ # compare (methodIndex, vtableCount)
+ bgeu a2, a3, .L${opcode}_nsm # method not present in superclass
+ LOAD_base_offClassObject_vtable(a1, a1) # a1 <- ...clazz->super->vtable
+ LOAD_eas2(a0, a1, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethod${routine} # continue on
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * a0 = resolved base method
+ */
+.L${opcode}_nsm:
+ LOAD_base_offMethod_name(a1, a0) # a1 <- method name
+ b common_errNoSuchMethod
+
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER_JUMBO.S b/vm/mterp/mips/OP_INVOKE_SUPER_JUMBO.S
new file mode 100644
index 000000000..5794cb1b8
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER_JUMBO.S
@@ -0,0 +1,56 @@
+%verify "executed"
+%verify "unknown method"
+ /*
+ * Handle a "super" method call.
+ */
+ /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ FETCH(t0, 4) # t0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ GET_VREG(rOBJ, t0) # rOBJ <- "this" ptr
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ # null "this"?
+ LOAD_rSELF_method(t1) # t1 <- current method
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ # cmp a0, 0; already resolved?
+ LOAD_base_offMethod_clazz(rBIX, t1) # rBIX <- method->clazz
+ EXPORT_PC() # must export for invoke
+ bnez a0, .L${opcode}_continue # resolved, continue on
+
+ move a0, rBIX # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .L${opcode}_continue
+%break
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX = method->clazz
+ */
+.L${opcode}_continue:
+ LOAD_base_offClassObject_super(a1, rBIX) # a1 <- method->clazz->super
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ LOAD_base_offClassObject_vtableCount(a3, a1) # a3 <- super->vtableCount
+ EXPORT_PC() # must export for invoke
+ # compare (methodIndex, vtableCount)
+ bgeu a2, a3, .L${opcode}_nsm # method not present in superclass
+ LOAD_base_offClassObject_vtable(a1, a1) # a1 <- ...clazz->super->vtable
+ LOAD_eas2(a0, a1, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethodJumbo # a0=method rOBJ="this"
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * a0 = resolved base method
+ */
+.L${opcode}_nsm:
+ LOAD_base_offMethod_name(a1, a0) # a1 <- method name
+ b common_errNoSuchMethod
+
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER_QUICK.S b/vm/mterp/mips/OP_INVOKE_SUPER_QUICK.S
new file mode 100644
index 000000000..eb5465a9c
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER_QUICK.S
@@ -0,0 +1,26 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ FETCH(t0, 2) # t0 <- GFED or CCCC
+ LOAD_rSELF_method(a2) # a2 <- current method
+ .if (!$isrange)
+ and t0, t0, 15 # t0 <- D (or stays CCCC)
+ .endif
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offMethod_clazz(a2, a2) # a2 <- method->clazz
+ EXPORT_PC() # must export for invoke
+ LOAD_base_offClassObject_super(a2, a2) # a2 <- method->clazz->super
+ GET_VREG(rOBJ, t0) # rOBJ <- "this"
+ LOAD_base_offClassObject_vtable(a2, a2) # a2 <- ...clazz->super->vtable
+ # is "this" null ?
+ LOAD_eas2(a0, a2, a1) # a0 <- super->vtable[BBBB]
+ beqz rOBJ, common_errNullObject # "this" is null, throw exception
+ b common_invokeMethod${routine} # (a0=method, rOBJ="this")
+
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER_QUICK_RANGE.S b/vm/mterp/mips/OP_INVOKE_SUPER_QUICK_RANGE.S
new file mode 100644
index 000000000..ade7bbad1
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER_QUICK_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_SUPER_QUICK.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_SUPER_RANGE.S b/vm/mterp/mips/OP_INVOKE_SUPER_RANGE.S
new file mode 100644
index 000000000..7821d31eb
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_SUPER_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_SUPER.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL.S
new file mode 100644
index 000000000..9f6d2c32b
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL.S
@@ -0,0 +1,48 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ FETCH(rBIX, 2) # rBIX <- GFED or CCCC
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ .if (!$isrange)
+ and rBIX, rBIX, 15 # rBIX <- D (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, .L${opcode}_continue # yes, continue on
+
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ bnez v0, .L${opcode}_continue # no, continue
+ b common_exceptionThrown # yes, handle exception
+%break
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.L${opcode}_continue:
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ # is "this" null?
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ LOAD_base_offObject_clazz(a3, rOBJ) # a3 <- thisPtr->clazz
+ LOAD_base_offClassObject_vtable(a3, a3) # a3 <- thisPtr->clazz->vtable
+ LOAD_eas2(a0, a3, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethod${routine} # (a0=method, rOBJ="this")
+
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL_JUMBO.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL_JUMBO.S
new file mode 100644
index 000000000..6bcde3425
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL_JUMBO.S
@@ -0,0 +1,44 @@
+%verify "executed"
+%verify "unknown method"
+%verify "null object"
+ /*
+ * Handle a virtual method call.
+ */
+ /* invoke-virtual/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, .L${opcode}_continue # yes, continue on
+
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ bnez v0, .L${opcode}_continue # no, continue
+ b common_exceptionThrown # yes, handle exception
+%break
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.L${opcode}_continue:
+ FETCH(rBIX,4) # rBIX <- CCCC
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ # is "this" null?
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ LOAD_base_offObject_clazz(a3, rOBJ) # a3 <- thisPtr->clazz
+ LOAD_base_offClassObject_vtable(a3, a3) # a3 <- thisPtr->clazz->vtable
+ LOAD_eas2(a0, a3, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethodJumbo # (a0=method, rOBJ="this")
+
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK.S
new file mode 100644
index 000000000..1952b70aa
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK.S
@@ -0,0 +1,23 @@
+%default { "isrange":"0", "routine":"NoRange" }
+%verify "executed"
+%verify "null object"
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ FETCH(a3, 2) # a3 <- FEDC or CCCC
+ FETCH(a1, 1) # a1 <- BBBB
+ .if (!$isrange)
+ and a3, a3, 15 # a3 <- C (or stays CCCC)
+ .endif
+ GET_VREG(rOBJ, a3) # rOBJ <- vC ("this" ptr)
+ # is "this" null?
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ LOAD_base_offObject_clazz(a2, rOBJ) # a2 <- thisPtr->clazz
+ LOAD_base_offClassObject_vtable(a2, a2) # a2 <- thisPtr->clazz->vtable
+ EXPORT_PC() # invoke must export
+ LOAD_eas2(a0, a2, a1) # a0 <- vtable[BBBB]
+ b common_invokeMethod${routine} # (a0=method, r9="this")
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK_RANGE.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK_RANGE.S
new file mode 100644
index 000000000..804889530
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL_QUICK_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_VIRTUAL_QUICK.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_INVOKE_VIRTUAL_RANGE.S b/vm/mterp/mips/OP_INVOKE_VIRTUAL_RANGE.S
new file mode 100644
index 000000000..5f86b4bce
--- /dev/null
+++ b/vm/mterp/mips/OP_INVOKE_VIRTUAL_RANGE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_INVOKE_VIRTUAL.S" { "isrange":"1", "routine":"Range" }
diff --git a/vm/mterp/mips/OP_IPUT.S b/vm/mterp/mips/OP_IPUT.S
new file mode 100644
index 000000000..626cc9229
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT.S
@@ -0,0 +1,50 @@
+%default { "store":"sw","postbarrier":" # noop", "prebarrier":" # noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .L${opcode}_finish # yes, finish up
+ b common_exceptionThrown
+%break
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.L${opcode}_finish:
+ #BAL(common_squeak${sqnum})
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ $prebarrier # releasing store
+ $store a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ $postbarrier
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_BOOLEAN.S b/vm/mterp/mips/OP_IPUT_BOOLEAN.S
new file mode 100644
index 000000000..4f09dab97
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S"
diff --git a/vm/mterp/mips/OP_IPUT_BOOLEAN_JUMBO.S b/vm/mterp/mips/OP_IPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 000000000..8457c294e
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IPUT_BYTE.S b/vm/mterp/mips/OP_IPUT_BYTE.S
new file mode 100644
index 000000000..4f09dab97
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S"
diff --git a/vm/mterp/mips/OP_IPUT_BYTE_JUMBO.S b/vm/mterp/mips/OP_IPUT_BYTE_JUMBO.S
new file mode 100644
index 000000000..8457c294e
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IPUT_CHAR.S b/vm/mterp/mips/OP_IPUT_CHAR.S
new file mode 100644
index 000000000..4f09dab97
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S"
diff --git a/vm/mterp/mips/OP_IPUT_CHAR_JUMBO.S b/vm/mterp/mips/OP_IPUT_CHAR_JUMBO.S
new file mode 100644
index 000000000..8457c294e
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IPUT_JUMBO.S b/vm/mterp/mips/OP_IPUT_JUMBO.S
new file mode 100644
index 000000000..2d0598400
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_JUMBO.S
@@ -0,0 +1,58 @@
+%default { "store":"sw","postbarrier":"# noop ", "prebarrier":" # noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Jumbo 32-bit instance field put.
+ *
+ * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+ * iput-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .L${opcode}_resolved # resolved, continue
+
+%break
+
+.L${opcode}_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to ${opcode}_finish
+
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.L${opcode}_finish:
+ #BAL(common_squeak${sqnum})
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ $prebarrier # releasing store
+ $store a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ $postbarrier
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT.S b/vm/mterp/mips/OP_IPUT_OBJECT.S
new file mode 100644
index 000000000..0382fa8fe
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT.S
@@ -0,0 +1,56 @@
+%default { "store":"sw", "postbarrier":" # noop", "prebarrier":" # noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * 32-bit instance field put.
+ *
+ * for: iput-object, iput-object-volatile
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .L${opcode}_finish # yes, finish up
+ b common_exceptionThrown
+%break
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.L${opcode}_finish:
+ #BAL(common_squeak${sqnum})
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu t2, rOBJ, a3 # form address
+ $prebarrier # releasing store
+ $store a0, (t2) # obj.field (32 bits) <- a0
+ $postbarrier
+ beqz a0, 1f # stored a null reference?
+ srl t1, rOBJ, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, (t2) # mark card if not
+1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT_JUMBO.S b/vm/mterp/mips/OP_IPUT_OBJECT_JUMBO.S
new file mode 100644
index 000000000..ce82ff883
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT_JUMBO.S
@@ -0,0 +1,60 @@
+%default { "store":"sw", "postbarrier":" # noop", "prebarrier":" # noop", "sqnum":"0" }
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Jumbo 32-bit instance field put.
+ */
+ /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a1,a1,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .L${opcode}_resolved
+
+%break
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.L${opcode}_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to ${opcode}_finish
+
+.L${opcode}_finish:
+ #BAL(common_squeak${sqnum})
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu t2, rOBJ, a3 # form address
+ $prebarrier # releasing store
+ $store a0, (t2) # obj.field (32 bits) <- a0
+ $postbarrier
+ beqz a0, 1f # stored a null reference?
+ srl t1, rOBJ, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, (t2) # mark card if not
+1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT_QUICK.S b/vm/mterp/mips/OP_IPUT_OBJECT_QUICK.S
new file mode 100644
index 000000000..eb0afb454
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT_QUICK.S
@@ -0,0 +1,21 @@
+%verify "executed"
+%verify "null object"
+ /* For: iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ sw a0, 0(t0) # obj.field (always 32 bits) <- a0
+ beqz a0, 1f
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ srl t1, a3, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, 0(t2)
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE.S b/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE.S
new file mode 100644
index 000000000..8320a7d8f
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_OBJECT.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..48cdb6c19
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_OBJECT_JUMBO.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IPUT_QUICK.S b/vm/mterp/mips/OP_IPUT_QUICK.S
new file mode 100644
index 000000000..8976265b9
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_QUICK.S
@@ -0,0 +1,16 @@
+%verify "executed"
+%verify "null object"
+ /* For: iput-quick, iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ sw a0, 0(t0) # obj.field (always 32 bits) <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_SHORT.S b/vm/mterp/mips/OP_IPUT_SHORT.S
new file mode 100644
index 000000000..4f09dab97
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S"
diff --git a/vm/mterp/mips/OP_IPUT_SHORT_JUMBO.S b/vm/mterp/mips/OP_IPUT_SHORT_JUMBO.S
new file mode 100644
index 000000000..8457c294e
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_IPUT_VOLATILE.S b/vm/mterp/mips/OP_IPUT_VOLATILE.S
new file mode 100644
index 000000000..4cb365f5e
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IPUT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IPUT_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..aaf70b78f
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_JUMBO.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_IPUT_WIDE.S b/vm/mterp/mips/OP_IPUT_WIDE.S
new file mode 100644
index 000000000..b8d969097
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE.S
@@ -0,0 +1,48 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ # iput-wide vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .L${opcode}_finish # yes, finish up
+ b common_exceptionThrown
+%break
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.L${opcode}_finish:
+ GET_OPA4(a2) # a2 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ EAS2(a2, rFP, a2) # a2 <- &fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a0, a1, a2) # a0/a1 <- fp[A]
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ addu a2, rOBJ, a3 # form address
+ .if $volatile
+ JAL(dvmQuasiAtomicSwap64Sync) # stores r0/r1 into addr r2
+# STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .else
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_WIDE_JUMBO.S b/vm/mterp/mips/OP_IPUT_WIDE_JUMBO.S
new file mode 100644
index 000000000..8edc14292
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE_JUMBO.S
@@ -0,0 +1,55 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "null object"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .L${opcode}_resolved # resolved, continue
+
+%break
+
+.L${opcode}_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to ${opcode}_finish
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.L${opcode}_finish:
+ FETCH(a2, 3) # a1<- BBBB
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ EAS2(a2, rFP, a2) # a2 <- &fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ LOAD64(a0, a1, a2) # a0/a1 <- fp[BBBB]
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ addu a2, rOBJ, a3 # form address
+ .if $volatile
+ JAL(dvmQuasiAtomicSwap64Sync) # stores r0/r1 into addr r2
+# STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .else
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
+
diff --git a/vm/mterp/mips/OP_IPUT_WIDE_QUICK.S b/vm/mterp/mips/OP_IPUT_WIDE_QUICK.S
new file mode 100644
index 000000000..f86c403e3
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE_QUICK.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "null object"
+ # iput-wide-quick vA, vB, offset /* CCCC */
+ GET_OPA4(a0) # a0 <- A(+)
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
+ EAS2(a3, rFP, a0) # a3 <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[A]
+ # check object for null
+ beqz a2, common_errNullObject # object was null
+ FETCH(a3, 1) # a3 <- field byte offset
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE.S b/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE.S
new file mode 100644
index 000000000..784be66ae
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_WIDE.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..72436fa24
--- /dev/null
+++ b/vm/mterp/mips/OP_IPUT_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_IPUT_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_LONG_TO_DOUBLE.S b/vm/mterp/mips/OP_LONG_TO_DOUBLE.S
new file mode 100644
index 000000000..fad9ec0fe
--- /dev/null
+++ b/vm/mterp/mips/OP_LONG_TO_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unflopWide.S" {"instr":"JAL(__floatdidf)", "ld_arg":"LOAD64(rARG0, rARG1, a3)"}
diff --git a/vm/mterp/mips/OP_LONG_TO_FLOAT.S b/vm/mterp/mips/OP_LONG_TO_FLOAT.S
new file mode 100644
index 000000000..86a143a01
--- /dev/null
+++ b/vm/mterp/mips/OP_LONG_TO_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopNarrower.S" {"instr":"JAL(__floatdisf)", "instr_f":"JAL(__floatdisf)", "load":"LOAD64(rARG0, rARG1, a3)"}
diff --git a/vm/mterp/mips/OP_LONG_TO_INT.S b/vm/mterp/mips/OP_LONG_TO_INT.S
new file mode 100644
index 000000000..fe8f865ab
--- /dev/null
+++ b/vm/mterp/mips/OP_LONG_TO_INT.S
@@ -0,0 +1,10 @@
+%verify "executed"
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifdef HAVE_BIG_ENDIAN
+ addu a1, a1, 1
+#endif
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
diff --git a/vm/mterp/mips/OP_MONITOR_ENTER.S b/vm/mterp/mips/OP_MONITOR_ENTER.S
new file mode 100644
index 000000000..1f5541eb8
--- /dev/null
+++ b/vm/mterp/mips/OP_MONITOR_ENTER.S
@@ -0,0 +1,17 @@
+%verify "executed"
+%verify "exception for null object"
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a1, a2) # a1 <- vAA (object)
+ move a0, rSELF # a0 <- self
+ EXPORT_PC() # export PC so we can grab stack trace
+ # null object?
+ beqz a1, common_errNullObject # null object, throw an exception
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ JAL(dvmLockObject) # call(self, obj)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MONITOR_EXIT.S b/vm/mterp/mips/OP_MONITOR_EXIT.S
new file mode 100644
index 000000000..fc671cb51
--- /dev/null
+++ b/vm/mterp/mips/OP_MONITOR_EXIT.S
@@ -0,0 +1,26 @@
+%verify "executed"
+%verify "exception for null object (impossible in javac)"
+%verify "dvmUnlockObject fails"
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ GET_OPA(a2) # a2 <- AA
+ EXPORT_PC() # before fetch: export the PC
+ GET_VREG(a1, a2) # a1 <- vAA (object)
+ # null object?
+ beqz a1, 1f
+ move a0, rSELF # a0 <- self
+ JAL(dvmUnlockObject) # v0 <- success for unlock(self, obj)
+ # failed?
+ FETCH_ADVANCE_INST(1) # before throw: advance rPC, load rINST
+ beqz v0, common_exceptionThrown # yes, exception is pending
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+1:
+ FETCH_ADVANCE_INST(1) # before throw: advance rPC, load rINST
+ b common_errNullObject
diff --git a/vm/mterp/mips/OP_MOVE.S b/vm/mterp/mips/OP_MOVE.S
new file mode 100644
index 000000000..dbf7ea451
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE.S
@@ -0,0 +1,10 @@
+%verify "executed"
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
+
diff --git a/vm/mterp/mips/OP_MOVE_16.S b/vm/mterp/mips/OP_MOVE_16.S
new file mode 100644
index 000000000..8410b9399
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_16.S
@@ -0,0 +1,10 @@
+%verify "executed"
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(a1, 2) # a1 <- BBBB
+ FETCH(a0, 1) # a0 <- AAAA
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2 and jump
+
diff --git a/vm/mterp/mips/OP_MOVE_EXCEPTION.S b/vm/mterp/mips/OP_MOVE_EXCEPTION.S
new file mode 100644
index 000000000..1040155c4
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_EXCEPTION.S
@@ -0,0 +1,11 @@
+%verify "executed"
+ /* move-exception vAA */
+ GET_OPA(a2) # a2 <- AA
+ LOAD_offThread_exception(a3, rSELF) # a3 <- dvmGetException bypass
+ li a1, 0 # a1 <- 0
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ SET_VREG(a3, a2) # fp[AA] <- exception obj
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE_offThread_exception(a1, rSELF) # dvmClearException bypass
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MOVE_FROM16.S b/vm/mterp/mips/OP_MOVE_FROM16.S
new file mode 100644
index 000000000..d01814091
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_FROM16.S
@@ -0,0 +1,10 @@
+%verify "executed"
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
+
diff --git a/vm/mterp/mips/OP_MOVE_OBJECT.S b/vm/mterp/mips/OP_MOVE_OBJECT.S
new file mode 100644
index 000000000..7150ed506
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_MOVE.S"
diff --git a/vm/mterp/mips/OP_MOVE_OBJECT_16.S b/vm/mterp/mips/OP_MOVE_OBJECT_16.S
new file mode 100644
index 000000000..c3dfae034
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_OBJECT_16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_MOVE_16.S"
diff --git a/vm/mterp/mips/OP_MOVE_OBJECT_FROM16.S b/vm/mterp/mips/OP_MOVE_OBJECT_FROM16.S
new file mode 100644
index 000000000..1ec1ae973
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_OBJECT_FROM16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_MOVE_FROM16.S"
diff --git a/vm/mterp/mips/OP_MOVE_RESULT.S b/vm/mterp/mips/OP_MOVE_RESULT.S
new file mode 100644
index 000000000..05f40fa45
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_RESULT.S
@@ -0,0 +1,9 @@
+%verify "executed"
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ LOAD_rSELF_retval(a0) # a0 <- self->retval.i
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
+
diff --git a/vm/mterp/mips/OP_MOVE_RESULT_OBJECT.S b/vm/mterp/mips/OP_MOVE_RESULT_OBJECT.S
new file mode 100644
index 000000000..74aa09173
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_RESULT_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_MOVE_RESULT.S"
diff --git a/vm/mterp/mips/OP_MOVE_RESULT_WIDE.S b/vm/mterp/mips/OP_MOVE_RESULT_WIDE.S
new file mode 100644
index 000000000..8a548d1fc
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_RESULT_WIDE.S
@@ -0,0 +1,11 @@
+%verify "executed"
+ /* move-result-wide vAA */
+ GET_OPA(a2) # a2 <- AA
+ addu a3, rSELF, offThread_retval # a3 <- &self->retval
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ LOAD64(a0, a1, a3) # a0/a1 <- retval.j
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a2) # fp[AA] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MOVE_WIDE.S b/vm/mterp/mips/OP_MOVE_WIDE.S
new file mode 100644
index 000000000..747006125
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_WIDE.S
@@ -0,0 +1,13 @@
+%verify "executed"
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ GET_OPA4(a2) # a2 <- A(+)
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ EAS2(a2, rFP, a2) # a2 <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a2) # fp[A] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MOVE_WIDE_16.S b/vm/mterp/mips/OP_MOVE_WIDE_16.S
new file mode 100644
index 000000000..bdd9f2690
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_WIDE_16.S
@@ -0,0 +1,13 @@
+%verify "executed"
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 2) # a3 <- BBBB
+ FETCH(a2, 1) # a2 <- AAAA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ EAS2(a2, rFP, a2) # a2 <- &fp[AAAA]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a2) # fp[AAAA] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MOVE_WIDE_FROM16.S b/vm/mterp/mips/OP_MOVE_WIDE_FROM16.S
new file mode 100644
index 000000000..44251f451
--- /dev/null
+++ b/vm/mterp/mips/OP_MOVE_WIDE_FROM16.S
@@ -0,0 +1,13 @@
+%verify "executed"
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 1) # a3 <- BBBB
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a2) # fp[AA] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MUL_DOUBLE.S b/vm/mterp/mips/OP_MUL_DOUBLE.S
new file mode 100644
index 000000000..565ca57fb
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(__muldf3)", "instr_f":"mul.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_MUL_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_MUL_DOUBLE_2ADDR.S
new file mode 100644
index 000000000..8d1dac1f6
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(__muldf3)", "instr_f":"mul.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_MUL_FLOAT.S b/vm/mterp/mips/OP_MUL_FLOAT.S
new file mode 100644
index 000000000..af9bb3b18
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(__mulsf3)", "instr_f":"mul.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_MUL_FLOAT_2ADDR.S b/vm/mterp/mips/OP_MUL_FLOAT_2ADDR.S
new file mode 100644
index 000000000..726e8a492
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(__mulsf3)", "instr_f":"mul.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_MUL_INT.S b/vm/mterp/mips/OP_MUL_INT.S
new file mode 100644
index 000000000..d9d6d2acd
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"mul a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_MUL_INT_2ADDR.S b/vm/mterp/mips/OP_MUL_INT_2ADDR.S
new file mode 100644
index 000000000..bbf4d77a4
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"mul a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_MUL_INT_LIT16.S b/vm/mterp/mips/OP_MUL_INT_LIT16.S
new file mode 100644
index 000000000..654e76dd0
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"mul a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_MUL_INT_LIT8.S b/vm/mterp/mips/OP_MUL_INT_LIT8.S
new file mode 100644
index 000000000..c0278ae1b
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"mul a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_MUL_LONG.S b/vm/mterp/mips/OP_MUL_LONG.S
new file mode 100644
index 000000000..c16a2307e
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_LONG.S
@@ -0,0 +1,41 @@
+%verify "executed"
+ /*
+ * Signed 64-bit integer multiply.
+ * a1 a0
+ * x a3 a2
+ * -------------
+ * a2a1 a2a0
+ * a3a0
+ * a3a1 (<= unused)
+ * ---------------
+ * v1 v0
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ and t0, a0, 255 # a2 <- BB
+ srl t1, a0, 8 # a3 <- CC
+ EAS2(t0, rFP, t0) # t0 <- &fp[BB]
+ LOAD64(a0, a1, t0) # a0/a1 <- vBB/vBB+1
+
+ EAS2(t1, rFP, t1) # t0 <- &fp[CC]
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+
+ mul v1, a3, a0 # v1= a3a0
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+ mul t0, a2, a1 # t0= a2a1
+ addu v1, v1, t1 # v1+= hi(a2a0)
+ addu v1, v1, t0 # v1= a3a0 + a2a1;
+
+ GET_OPA(a0) # a0 <- AA
+ EAS2(a0, rFP, a0) # a0 <- &fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ b .L${opcode}_finish
+%break
+
+.L${opcode}_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, a0) # vAA::vAA+1 <- v0(low) :: v1(high)
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_MUL_LONG_2ADDR.S b/vm/mterp/mips/OP_MUL_LONG_2ADDR.S
new file mode 100644
index 000000000..85de7be25
--- /dev/null
+++ b/vm/mterp/mips/OP_MUL_LONG_2ADDR.S
@@ -0,0 +1,28 @@
+%verify "executed"
+ /*
+ * See comments in OP_MUL_LONG.S
+ */
+ /* mul-long/2addr vA, vB */
+ GET_OPA4(t0) # t0 <- A+
+
+ EAS2(t0, rFP, t0) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # vAA.low / high
+
+ GET_OPB(t1) # t1 <- B
+ EAS2(t1, rFP, t1) # t1 <- &fp[B]
+ LOAD64(a2, a3, t1) # vBB.low / high
+
+ mul v1, a3, a0 # v1= a3a0
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+ mul t2, a2, a1 # t2= a2a1
+ addu v1, v1, t1 # v1= a3a0 + hi(a2a0)
+ addu v1, v1, t2 # v1= v1 + a2a1;
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ # vAA <- v0 (low)
+ STORE64(v0, v1, t0) # vAA+1 <- v1 (high)
+ GOTO_OPCODE(t1) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_NEG_DOUBLE.S b/vm/mterp/mips/OP_NEG_DOUBLE.S
new file mode 100644
index 000000000..5707c6590
--- /dev/null
+++ b/vm/mterp/mips/OP_NEG_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopWide.S" {"instr":"addu a1, a1, 0x80000000"}
diff --git a/vm/mterp/mips/OP_NEG_FLOAT.S b/vm/mterp/mips/OP_NEG_FLOAT.S
new file mode 100644
index 000000000..7e25e55b3
--- /dev/null
+++ b/vm/mterp/mips/OP_NEG_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"instr":"addu a0, a0, 0x80000000"}
diff --git a/vm/mterp/mips/OP_NEG_INT.S b/vm/mterp/mips/OP_NEG_INT.S
new file mode 100644
index 000000000..da87a6a72
--- /dev/null
+++ b/vm/mterp/mips/OP_NEG_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"instr":"negu a0, a0"}
diff --git a/vm/mterp/mips/OP_NEG_LONG.S b/vm/mterp/mips/OP_NEG_LONG.S
new file mode 100644
index 000000000..a5629878a
--- /dev/null
+++ b/vm/mterp/mips/OP_NEG_LONG.S
@@ -0,0 +1,3 @@
+%verify "executed"
+%include "mips/unopWide.S" {"result0":"v0", "result1":"v1", "preinstr":"negu v0, a0", "instr":"negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0"}
+
diff --git a/vm/mterp/mips/OP_NEW_ARRAY.S b/vm/mterp/mips/OP_NEW_ARRAY.S
new file mode 100644
index 000000000..5d0179447
--- /dev/null
+++ b/vm/mterp/mips/OP_NEW_ARRAY.S
@@ -0,0 +1,61 @@
+%verify "executed"
+%verify "negative array length"
+%verify "allocation fails"
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ GET_OPB(a0) # a0 <- B
+ FETCH(a2, 1) # a2 <- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ GET_VREG(a1, a0) # a1 <- vB (array length)
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ LOAD_eas2(a0, a3, a2) # a0 <- resolved class
+ # check length
+ bltz a1, common_errNegativeArraySize # negative length, bail - len in a1
+ EXPORT_PC() # req'd for resolve, alloc
+ # already resolved?
+ beqz a0, .L${opcode}_resolve
+
+ /*
+ * Finish allocation.
+ *
+ * a0 holds class
+ * a1 holds array length
+ */
+.L${opcode}_finish:
+ li a2, ALLOC_DONT_TRACK # don't track in local refs table
+ JAL(dvmAllocArrayByClass) # v0 <- call(clazz, length, flags)
+ GET_OPA4(a2) # a2 <- A+
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle the exception
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(v0, a2) # vA <- v0
+ GOTO_OPCODE(t0) # jump to next instruction
+%break
+
+ /*
+ * Resolve class. (This is an uncommon case.)
+ *
+ * a1 holds array length
+ * a2 holds class ref CCCC
+ */
+.L${opcode}_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ move rOBJ, a1 # rOBJ <- length (save)
+ move a1, a2 # a1 <- CCCC
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- call(clazz, ref)
+ move a1, rOBJ # a1 <- length (restore)
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ move a0, v0
+ b .L${opcode}_finish # continue with ${opcode}_finish
+
+
diff --git a/vm/mterp/mips/OP_NEW_ARRAY_JUMBO.S b/vm/mterp/mips/OP_NEW_ARRAY_JUMBO.S
new file mode 100644
index 000000000..67615054d
--- /dev/null
+++ b/vm/mterp/mips/OP_NEW_ARRAY_JUMBO.S
@@ -0,0 +1,69 @@
+%verify "executed"
+%verify "negative array length"
+%verify "allocation fails"
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+ FETCH(a2, 1) # a2<- aaaa (lo)
+ FETCH(a3, 2) # a3<- AAAA (hi)
+ FETCH(a0, 4) # a0<- vCCCC
+ sll a3,a3,16 #
+ or a2, a2, a3 # a2<- AAAAaaaa
+
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ GET_VREG(a1, a0) # a1 <- vCCCC (array length)
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ LOAD_eas2(a0, a3, a2) # a0 <- resolved class
+ # check length
+ bltz a1, common_errNegativeArraySize # negative length, bail - len in a1
+ EXPORT_PC() # req'd for resolve, alloc
+ # already resolved?
+ beqz a0, .L${opcode}_resolve # not resolved,
+ b .L${opcode}_finish
+%break
+
+ /*
+ * Finish allocation.
+ *
+ * a0 holds class
+ * a1 holds array length
+ */
+.L${opcode}_finish:
+ li a2, ALLOC_DONT_TRACK # don't track in local refs table
+ JAL(dvmAllocArrayByClass) # v0 <- call(clazz, length, flags)
+ FETCH(a2, 3) # r2<- vBBBB
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle the exception
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(v0, a2) # vBBBB <- v0
+ GOTO_OPCODE(t0) # jump to next instruction
+#%break
+
+
+
+ /*
+ * Resolve class. (This is an uncommon case.)
+ *
+ * a1 holds array length
+ * a2 holds class ref AAAAAAAA
+ */
+.L${opcode}_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ move rOBJ, a1 # rOBJ <- length (save)
+ move a1, a2 # a1 <- AAAAAAAA
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- call(clazz, ref)
+ move a1, rOBJ # a1 <- length (restore)
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ move a0, v0
+ b .L${opcode}_finish # continue with to ${opcode}_finish
+
+
diff --git a/vm/mterp/mips/OP_NEW_INSTANCE.S b/vm/mterp/mips/OP_NEW_INSTANCE.S
new file mode 100644
index 000000000..ca946add3
--- /dev/null
+++ b/vm/mterp/mips/OP_NEW_INSTANCE.S
@@ -0,0 +1,106 @@
+%verify "executed"
+%verify "class not resolved"
+%verify "class cannot be resolved"
+%verify "class not initialized"
+%verify "class fails to initialize"
+%verify "class already resolved/initialized"
+%verify "class is abstract or interface"
+%verify "allocation fails"
+ /*
+ * Create a new instance of a class.
+ */
+ # new-instance vAA, class /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved class
+#if defined(WITH_JIT)
+ EAS2(rBIX, a3, a1) # rBIX <- &resolved_class
+#endif
+ EXPORT_PC() # req'd for init, resolve, alloc
+ # already resolved?
+ beqz a0, .L${opcode}_resolve # no, resolve it now
+.L${opcode}_resolved: # a0=class
+ lbu a1, offClassObject_status(a0) # a1 <- ClassStatus enum
+ # has class been initialized?
+ li t0, CLASS_INITIALIZED
+ move rOBJ, a0 # save a0
+ bne a1, t0, .L${opcode}_needinit # no, init class now
+
+.L${opcode}_initialized: # a0=class
+ LOAD_base_offClassObject_accessFlags(a3, a0) # a3 <- clazz->accessFlags
+ li a1, ALLOC_DONT_TRACK # flags for alloc call
+ # a0=class
+ JAL(dvmAllocObject) # v0 <- new object
+ GET_OPA(a3) # a3 <- AA
+#if defined(WITH_JIT)
+ /*
+ * The JIT needs the class to be fully resolved before it can
+ * include this instruction in a trace.
+ */
+ lhu a1, offThread_subMode(rSELF)
+ beqz v0, common_exceptionThrown # yes, handle the exception
+ and a1, kSubModeJitTraceBuild # under construction?
+ bnez a1, .L${opcode}_jitCheck
+#else
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle the exception
+#endif
+ b .L${opcode}_continue
+
+%break
+
+.L${opcode}_continue:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(v0, a3) # vAA <- v0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we need to stop the trace building early.
+ * v0: new object
+ * a3: vAA
+ */
+.L${opcode}_jitCheck:
+ lw a1, 0(rBIX) # reload resolved class
+ # okay?
+ bnez a1, .L${opcode}_continue # yes, finish
+ move rOBJ, v0 # preserve new object
+ move rBIX, a3 # preserve vAA
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) # (self, pc)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(rOBJ, rBIX) # vAA <- new object
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+ /*
+ * Class initialization required.
+ *
+ * a0 holds class object
+ */
+.L${opcode}_needinit:
+ JAL(dvmInitClass) # initialize class
+ move a0, rOBJ # restore a0
+ # check boolean result
+ bnez v0, .L${opcode}_initialized # success, continue
+ b common_exceptionThrown # failed, deal with init exception
+
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a1 holds BBBB
+ */
+.L${opcode}_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ move a0, v0
+ # got null?
+ bnez v0, .L${opcode}_resolved # no, continue
+ b common_exceptionThrown # yes, handle exception
diff --git a/vm/mterp/mips/OP_NEW_INSTANCE_JUMBO.S b/vm/mterp/mips/OP_NEW_INSTANCE_JUMBO.S
new file mode 100644
index 000000000..a00991eb9
--- /dev/null
+++ b/vm/mterp/mips/OP_NEW_INSTANCE_JUMBO.S
@@ -0,0 +1,108 @@
+%verify "executed"
+%verify "class not resolved"
+%verify "class cannot be resolved"
+%verify "class not initialized"
+%verify "class fails to initialize"
+%verify "class already resolved/initialized"
+%verify "class is abstract or interface"
+%verify "allocation fails"
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+ FETCH(a0, 1) # a0<- aaaa (lo)DvmDex
+ FETCH(a1, 2) # a1<- AAAA (hi)BBB
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved class
+#if defined(WITH_JIT)
+ EAS2(rBIX, a3, a1) # rBIX <- &resolved_class
+#endif
+ EXPORT_PC() # req'd for init, resolve, alloc
+ # already resolved?
+ beqz a0, .L${opcode}_resolve # no, resolve it now
+.L${opcode}_resolved: # a0=class
+ lbu a1, offClassObject_status(a0) # a1 <- ClassStatus enum
+ # has class been initialized?
+ li t0, CLASS_INITIALIZED
+ move rOBJ, a0 # save a0
+ bne a1, t0, .L${opcode}_needinit # no, init class now
+
+.L${opcode}_initialized: # a0=class
+ LOAD_base_offClassObject_accessFlags(a3, a0) # a3 <- clazz->accessFlags
+ li a1, ALLOC_DONT_TRACK # flags for alloc call
+ # a0=class
+ JAL(dvmAllocObject) # v0 <- new object
+ FETCH(a3, 3) # a3<- BBBB
+#if defined(WITH_JIT)
+ /*
+ * The JIT needs the class to be fully resolved before it can
+ * include this instruction in a trace.
+ */
+ lhu a1, offThread_subMode(rSELF)
+ beqz v0, common_exceptionThrown # yes, handle the exception
+ and a1, kSubModeJitTraceBuild # under construction?
+ bnez a1, .L${opcode}_jitCheck
+#else
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle the exception
+#endif
+ b .L${opcode}_continue
+
+%break
+
+.L${opcode}_continue:
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(v0, a3) # vBBBB <- v0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we need to stop the trace building early.
+ * v0: new object
+ * a3: vAA
+ */
+.L${opcode}_jitCheck:
+ lw a1, 0(rBIX) # reload resolved class
+ # okay?
+ bnez a1, .L${opcode}_continue # yes, finish
+ move rOBJ, v0 # preserve new object
+ move rBIX, a3 # preserve vAA
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) # (self, pc)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(rOBJ, rBIX) # vAA <- new object
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+ /*
+ * Class initialization required.
+ *
+ * a0 holds class object
+ */
+.L${opcode}_needinit:
+ JAL(dvmInitClass) # initialize class
+ move a0, rOBJ # restore a0
+ # check boolean result
+ bnez v0, .L${opcode}_initialized # success, continue
+ b common_exceptionThrown # failed, deal with init exception
+
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a1 holds AAAAAAAA
+ */
+.L${opcode}_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ move a0, v0
+ # got null?
+ bnez v0, .L${opcode}_resolved # no, continue
+ b common_exceptionThrown # yes, handle exception
diff --git a/vm/mterp/mips/OP_NOP.S b/vm/mterp/mips/OP_NOP.S
new file mode 100644
index 000000000..38a5eb418
--- /dev/null
+++ b/vm/mterp/mips/OP_NOP.S
@@ -0,0 +1,13 @@
+%verify "executed"
+ FETCH_ADVANCE_INST(1) # advance to next instr, load rINST
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0) # execute it
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ .type dalvik_inst, @function
+dalvik_inst:
+ .ent dalvik_inst
+ .end dalvik_inst
+#endif
+
diff --git a/vm/mterp/mips/OP_NOT_INT.S b/vm/mterp/mips/OP_NOT_INT.S
new file mode 100644
index 000000000..3402d198a
--- /dev/null
+++ b/vm/mterp/mips/OP_NOT_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unop.S" {"instr":"not a0, a0"}
diff --git a/vm/mterp/mips/OP_NOT_LONG.S b/vm/mterp/mips/OP_NOT_LONG.S
new file mode 100644
index 000000000..8947c4eeb
--- /dev/null
+++ b/vm/mterp/mips/OP_NOT_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/unopWide.S" {"preinstr":"not a0, a0", "instr":"not a1, a1"}
diff --git a/vm/mterp/mips/OP_OR_INT.S b/vm/mterp/mips/OP_OR_INT.S
new file mode 100644
index 000000000..683242f6d
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"or a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_OR_INT_2ADDR.S b/vm/mterp/mips/OP_OR_INT_2ADDR.S
new file mode 100644
index 000000000..e63835bc3
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"or a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_OR_INT_LIT16.S b/vm/mterp/mips/OP_OR_INT_LIT16.S
new file mode 100644
index 000000000..c12495d91
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"or a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_OR_INT_LIT8.S b/vm/mterp/mips/OP_OR_INT_LIT8.S
new file mode 100644
index 000000000..f2ac2d065
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"or a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_OR_LONG.S b/vm/mterp/mips/OP_OR_LONG.S
new file mode 100644
index 000000000..8b080f66d
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_OR_LONG_2ADDR.S b/vm/mterp/mips/OP_OR_LONG_2ADDR.S
new file mode 100644
index 000000000..ef37dbf1d
--- /dev/null
+++ b/vm/mterp/mips/OP_OR_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide2addr.S" {"preinstr":"or a0, a0, a2", "instr":"or a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_PACKED_SWITCH.S b/vm/mterp/mips/OP_PACKED_SWITCH.S
new file mode 100644
index 000000000..add1dac7e
--- /dev/null
+++ b/vm/mterp/mips/OP_PACKED_SWITCH.S
@@ -0,0 +1,34 @@
+%default { "func":"dvmInterpHandlePackedSwitch" }
+%verify executed
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * When the JIT is present, all targets are considered treated as
+ * a potential trace heads regardless of branch direction.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll t0, a1, 16
+ or a0, a0, t0 # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL($func) # a0 <- code-unit branch offset
+ addu a1, v0, v0 # a1 <- byte offset
+ bgtz a1, 1f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+1:
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bnez a0, common_updateProfile
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_REM_DOUBLE.S b/vm/mterp/mips/OP_REM_DOUBLE.S
new file mode 100644
index 000000000..4329ed399
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(fmod)", "instr_f":"JAL(fmod)"}
diff --git a/vm/mterp/mips/OP_REM_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_REM_DOUBLE_2ADDR.S
new file mode 100644
index 000000000..97cd8930f
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(fmod)", "instr_f":"JAL(fmod)"}
diff --git a/vm/mterp/mips/OP_REM_FLOAT.S b/vm/mterp/mips/OP_REM_FLOAT.S
new file mode 100644
index 000000000..e68cfb585
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(fmodf)", "instr_f":"JAL(fmodf)"}
diff --git a/vm/mterp/mips/OP_REM_FLOAT_2ADDR.S b/vm/mterp/mips/OP_REM_FLOAT_2ADDR.S
new file mode 100644
index 000000000..f78cbb39b
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(fmodf)", "instr_f":"JAL(fmodf)"}
diff --git a/vm/mterp/mips/OP_REM_INT.S b/vm/mterp/mips/OP_REM_INT.S
new file mode 100644
index 000000000..f1dcf37db
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"div zero, a0, a1; mfhi a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_REM_INT_2ADDR.S b/vm/mterp/mips/OP_REM_INT_2ADDR.S
new file mode 100644
index 000000000..85d616bac
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"div zero, a0, a1; mfhi a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_REM_INT_LIT16.S b/vm/mterp/mips/OP_REM_INT_LIT16.S
new file mode 100644
index 000000000..1f3144210
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"div zero, a0, a1; mfhi a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_REM_INT_LIT8.S b/vm/mterp/mips/OP_REM_INT_LIT8.S
new file mode 100644
index 000000000..4b5bb8244
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"div zero, a0, a1; mfhi a0", "chkzero":"1"}
diff --git a/vm/mterp/mips/OP_REM_LONG.S b/vm/mterp/mips/OP_REM_LONG.S
new file mode 100644
index 000000000..d76221a08
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_LONG.S
@@ -0,0 +1,7 @@
+%verify "executed"
+/* ldivmod returns quotient in a0/a1 and remainder in a2/a3 */
+#ifdef HAVE_LITTLE_ENDIAN
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
+#else
+%include "mips/binopWide.S" { "arg0":"a1", "arg1":"a0", "arg2":"a3", "arg3":"a2", "result0":"v1", "result1":"v0", "instr":"JAL(__moddi3)", "chkzero":"1"}
+#endif
diff --git a/vm/mterp/mips/OP_REM_LONG_2ADDR.S b/vm/mterp/mips/OP_REM_LONG_2ADDR.S
new file mode 100644
index 000000000..be194a54d
--- /dev/null
+++ b/vm/mterp/mips/OP_REM_LONG_2ADDR.S
@@ -0,0 +1,6 @@
+%verify "executed"
+#ifdef HAVE_LITTLE_ENDIAN
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "instr":"JAL(__moddi3)", "chkzero":"1"}
+#else
+%include "mips/binopWide2addr.S" {"arg0":"a1", "arg1":"a0", "arg2":"a3", "arg3":"a2", "result0":"v1", "result1":"v0", "instr":"JAL(__moddi3)", "chkzero":"1"}
+#endif
diff --git a/vm/mterp/mips/OP_RETURN.S b/vm/mterp/mips/OP_RETURN.S
new file mode 100644
index 000000000..acc01cf6b
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN.S
@@ -0,0 +1,13 @@
+%verify "executed"
+ /*
+ * Return a 32-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a0, a2) # a0 <- vAA
+ sw a0, offThread_retval(rSELF) # retval.i <- vAA
+ b common_returnFromMethod
+
diff --git a/vm/mterp/mips/OP_RETURN_OBJECT.S b/vm/mterp/mips/OP_RETURN_OBJECT.S
new file mode 100644
index 000000000..445966870
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_RETURN.S"
diff --git a/vm/mterp/mips/OP_RETURN_VOID.S b/vm/mterp/mips/OP_RETURN_VOID.S
new file mode 100644
index 000000000..781f835f7
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN_VOID.S
@@ -0,0 +1,3 @@
+%verify "executed"
+ b common_returnFromMethod
+
diff --git a/vm/mterp/mips/OP_RETURN_VOID_BARRIER.S b/vm/mterp/mips/OP_RETURN_VOID_BARRIER.S
new file mode 100644
index 000000000..4cb5b9bda
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN_VOID_BARRIER.S
@@ -0,0 +1,3 @@
+%verify "executed"
+ SMP_DMB
+ b common_returnFromMethod
diff --git a/vm/mterp/mips/OP_RETURN_WIDE.S b/vm/mterp/mips/OP_RETURN_WIDE.S
new file mode 100644
index 000000000..bd93d6a03
--- /dev/null
+++ b/vm/mterp/mips/OP_RETURN_WIDE.S
@@ -0,0 +1,13 @@
+%verify "executed"
+ /*
+ * Return a 64-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ */
+ /* return-wide vAA */
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ addu a3, rSELF, offThread_retval # a3 <- &self->retval
+ LOAD64(a0, a1, a2) # a0/a1 <- vAA/vAA+1
+ STORE64(a0, a1, a3) # retval <- a0/a1
+ b common_returnFromMethod
+
diff --git a/vm/mterp/mips/OP_RSUB_INT.S b/vm/mterp/mips/OP_RSUB_INT.S
new file mode 100644
index 000000000..03918ea2d
--- /dev/null
+++ b/vm/mterp/mips/OP_RSUB_INT.S
@@ -0,0 +1,3 @@
+%verify "executed"
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+%include "mips/binopLit16.S" {"instr":"subu a0, a1, a0"}
diff --git a/vm/mterp/mips/OP_RSUB_INT_LIT8.S b/vm/mterp/mips/OP_RSUB_INT_LIT8.S
new file mode 100644
index 000000000..75d3d407a
--- /dev/null
+++ b/vm/mterp/mips/OP_RSUB_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"subu a0, a1, a0"}
diff --git a/vm/mterp/mips/OP_SGET.S b/vm/mterp/mips/OP_SGET.S
new file mode 100644
index 000000000..80e191388
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET.S
@@ -0,0 +1,50 @@
+%default { "barrier":" # no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .L${opcode}_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .L${opcode}_finish # resume
+%break
+
+.L${opcode}_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ $barrier # acquiring load
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[AA] <- a1
diff --git a/vm/mterp/mips/OP_SGET_BOOLEAN.S b/vm/mterp/mips/OP_SGET_BOOLEAN.S
new file mode 100644
index 000000000..86024ec5d
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_BOOLEAN_JUMBO.S b/vm/mterp/mips/OP_SGET_BOOLEAN_JUMBO.S
new file mode 100644
index 000000000..2a787a200
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_BYTE.S b/vm/mterp/mips/OP_SGET_BYTE.S
new file mode 100644
index 000000000..86024ec5d
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_BYTE_JUMBO.S b/vm/mterp/mips/OP_SGET_BYTE_JUMBO.S
new file mode 100644
index 000000000..2a787a200
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_CHAR.S b/vm/mterp/mips/OP_SGET_CHAR.S
new file mode 100644
index 000000000..86024ec5d
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_CHAR_JUMBO.S b/vm/mterp/mips/OP_SGET_CHAR_JUMBO.S
new file mode 100644
index 000000000..2a787a200
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_JUMBO.S b/vm/mterp/mips/OP_SGET_JUMBO.S
new file mode 100644
index 000000000..93e75863a
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_JUMBO.S
@@ -0,0 +1,54 @@
+%default { "barrier":" # no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Jumbo 32-bit SGET handler.
+ *
+ * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+ * sget-char/jumbo, sget-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .L${opcode}_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .L${opcode}_finish # resume
+%break
+
+.L${opcode}_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ $barrier # acquiring load
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[BBBB] <- a1
diff --git a/vm/mterp/mips/OP_SGET_OBJECT.S b/vm/mterp/mips/OP_SGET_OBJECT.S
new file mode 100644
index 000000000..86024ec5d
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_OBJECT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_OBJECT_JUMBO.S b/vm/mterp/mips/OP_SGET_OBJECT_JUMBO.S
new file mode 100644
index 000000000..2a787a200
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_OBJECT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE.S b/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE.S
new file mode 100644
index 000000000..d880f9783
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..c9975c8fa
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_OBJECT_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SGET_SHORT.S b/vm/mterp/mips/OP_SGET_SHORT.S
new file mode 100644
index 000000000..86024ec5d
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S"
diff --git a/vm/mterp/mips/OP_SGET_SHORT_JUMBO.S b/vm/mterp/mips/OP_SGET_SHORT_JUMBO.S
new file mode 100644
index 000000000..2a787a200
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SGET_VOLATILE.S b/vm/mterp/mips/OP_SGET_VOLATILE.S
new file mode 100644
index 000000000..d880f9783
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SGET_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SGET_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..93a5f4195
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_JUMBO.S" {"barrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SGET_WIDE.S b/vm/mterp/mips/OP_SGET_WIDE.S
new file mode 100644
index 000000000..0e7299230
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_WIDE.S
@@ -0,0 +1,58 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * 64-bit SGET handler.
+ */
+ # sget-wide vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in v0.
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+
+ b .L${opcode}_finish # resume
+%break
+
+.L${opcode}_finish:
+ GET_OPA(a1) # a1 <- AA
+ .if $volatile
+ vLOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .else
+ LOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a1, rFP, a1) # a1 <- &fp[AA]
+ STORE64(a2, a3, a1) # vAA/vAA+1 <- a2/a3
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
diff --git a/vm/mterp/mips/OP_SGET_WIDE_JUMBO.S b/vm/mterp/mips/OP_SGET_WIDE_JUMBO.S
new file mode 100644
index 000000000..7a5288926
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_WIDE_JUMBO.S
@@ -0,0 +1,47 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Jumbo 64-bit SGET handler.
+ */
+ /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(a2, a2) # a2 <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry null?
+ bnez a0, .L${opcode}_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ *
+ * Returns StaticField pointer in v0.
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # a0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+ b .L${opcode}_finish # resume
+%break
+
+.L${opcode}_finish:
+ FETCH(a1, 3) # a1<- BBBB
+ .if $volatile
+ vLOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .else
+ LOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .endif
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ EAS2(a1, rFP, a1) # a1 <- &fp[BBBB]
+ STORE64(a2, a3, a1) # vBBBB/vBBBB+1 <- a2/a3
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_SGET_WIDE_VOLATILE.S b/vm/mterp/mips/OP_SGET_WIDE_VOLATILE.S
new file mode 100644
index 000000000..ca2fce4f7
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_WIDE_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_WIDE.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_SGET_WIDE_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SGET_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..c6039c39f
--- /dev/null
+++ b/vm/mterp/mips/OP_SGET_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SGET_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_SHL_INT.S b/vm/mterp/mips/OP_SHL_INT.S
new file mode 100644
index 000000000..9981decf1
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"preinstr":"and a1, a1, 31", "instr":"sll a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHL_INT_2ADDR.S b/vm/mterp/mips/OP_SHL_INT_2ADDR.S
new file mode 100644
index 000000000..0ac0a8ff1
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"preinstr":"and a1, a1, 31", "instr":"sll a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHL_INT_LIT8.S b/vm/mterp/mips/OP_SHL_INT_LIT8.S
new file mode 100644
index 000000000..1110037ad
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"preinstr":"and a1, a1, 31", "instr":"sll a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHL_LONG.S b/vm/mterp/mips/OP_SHL_LONG.S
new file mode 100644
index 000000000..817ac2f29
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_LONG.S
@@ -0,0 +1,33 @@
+%verify "executed"
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t2) # t2 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+
+ EAS2(t2, rFP, t2) # t2 <- &fp[AA]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ andi a2, 0x20 # shift< shift & 0x20
+ movn v1, v0, a2 # rhi<- rlo (if shift&0x20)
+ movn v0, zero, a2 # rlo<- 0 (if shift&0x20)
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, t2) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_SHL_LONG_2ADDR.S b/vm/mterp/mips/OP_SHL_LONG_2ADDR.S
new file mode 100644
index 000000000..119142794
--- /dev/null
+++ b/vm/mterp/mips/OP_SHL_LONG_2ADDR.S
@@ -0,0 +1,28 @@
+%verify "executed"
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ GET_OPA4(t2) # t2 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(rOBJ, rFP, t2) # rOBJ <- &fp[A]
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ andi a2, 0x20 # shift< shift & 0x20
+ movn v1, v0, a2 # rhi<- rlo (if shift&0x20)
+ movn v0, zero, a2 # rlo<- 0 (if shift&0x20)
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_SHR_INT.S b/vm/mterp/mips/OP_SHR_INT.S
new file mode 100644
index 000000000..c5911e793
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"preinstr":"and a1, a1, 31", "instr":"sra a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHR_INT_2ADDR.S b/vm/mterp/mips/OP_SHR_INT_2ADDR.S
new file mode 100644
index 000000000..b979e9f0f
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"preinstr":"and a1, a1, 31", "instr":"sra a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHR_INT_LIT8.S b/vm/mterp/mips/OP_SHR_INT_LIT8.S
new file mode 100644
index 000000000..612461986
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"preinstr":"and a1, a1, 31", "instr":"sra a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SHR_LONG.S b/vm/mterp/mips/OP_SHR_LONG.S
new file mode 100644
index 000000000..690697898
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_LONG.S
@@ -0,0 +1,33 @@
+%verify "executed"
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t3) # t3 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+ EAS2(t3, rFP, t3) # t3 <- &fp[AA]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ sra a3, a1, 31 # a3<- sign(ah)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ andi a2, 0x20 # shift & 0x20
+ movn v0, v1, a2 # rlo<- rhi (if shift&0x20)
+ movn v1, a3, a2 # rhi<- sign(ahi) (if shift&0x20)
+
+ STORE64(v0, v1, t3) # vAA/VAA+1 <- v0/v0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_SHR_LONG_2ADDR.S b/vm/mterp/mips/OP_SHR_LONG_2ADDR.S
new file mode 100644
index 000000000..439923e66
--- /dev/null
+++ b/vm/mterp/mips/OP_SHR_LONG_2ADDR.S
@@ -0,0 +1,28 @@
+%verify "executed"
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ GET_OPA4(t2) # t2 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t2, rFP, t2) # t2 <- &fp[A]
+ LOAD64(a0, a1, t2) # a0/a1 <- vAA/vAA+1
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ sra a3, a1, 31 # a3<- sign(ah)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ andi a2, 0x20 # shift & 0x20
+ movn v0, v1, a2 # rlo<- rhi (if shift&0x20)
+ movn v1, a3, a2 # rhi<- sign(ahi) (if shift&0x20)
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, t2) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_SPARSE_SWITCH.S b/vm/mterp/mips/OP_SPARSE_SWITCH.S
new file mode 100644
index 000000000..32067dead
--- /dev/null
+++ b/vm/mterp/mips/OP_SPARSE_SWITCH.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_PACKED_SWITCH.S" { "func":"dvmInterpHandleSparseSwitch" }
diff --git a/vm/mterp/mips/OP_SPUT.S b/vm/mterp/mips/OP_SPUT.S
new file mode 100644
index 000000000..722a12f44
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT.S
@@ -0,0 +1,50 @@
+%default { "postbarrier":"# no-op", "prebarrier":"# no-op" }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .L${opcode}_finish # is resolved entry null?
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .L${opcode}_finish # resume
+%break
+
+.L${opcode}_finish:
+ # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ $prebarrier # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ $postbarrier
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_SPUT_BOOLEAN.S b/vm/mterp/mips/OP_SPUT_BOOLEAN.S
new file mode 100644
index 000000000..96434b7fa
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_BOOLEAN.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S"
diff --git a/vm/mterp/mips/OP_SPUT_BOOLEAN_JUMBO.S b/vm/mterp/mips/OP_SPUT_BOOLEAN_JUMBO.S
new file mode 100644
index 000000000..e183701de
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_BOOLEAN_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SPUT_BYTE.S b/vm/mterp/mips/OP_SPUT_BYTE.S
new file mode 100644
index 000000000..96434b7fa
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_BYTE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S"
diff --git a/vm/mterp/mips/OP_SPUT_BYTE_JUMBO.S b/vm/mterp/mips/OP_SPUT_BYTE_JUMBO.S
new file mode 100644
index 000000000..e183701de
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_BYTE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SPUT_CHAR.S b/vm/mterp/mips/OP_SPUT_CHAR.S
new file mode 100644
index 000000000..96434b7fa
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_CHAR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S"
diff --git a/vm/mterp/mips/OP_SPUT_CHAR_JUMBO.S b/vm/mterp/mips/OP_SPUT_CHAR_JUMBO.S
new file mode 100644
index 000000000..e183701de
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_CHAR_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SPUT_JUMBO.S b/vm/mterp/mips/OP_SPUT_JUMBO.S
new file mode 100644
index 000000000..5a4f8246e
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_JUMBO.S
@@ -0,0 +1,55 @@
+%default { "postbarrier":" # no-op ", "prebarrier":" # no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Jumbo 32-bit SPUT handler.
+ *
+ * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+ * sput-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .L${opcode}_finish # is resolved entry null?
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .L${opcode}_finish # resume
+%break
+
+.L${opcode}_finish:
+ # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ $prebarrier # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ $postbarrier
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_SPUT_OBJECT.S b/vm/mterp/mips/OP_SPUT_OBJECT.S
new file mode 100644
index 000000000..0fd3db3de
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_OBJECT.S
@@ -0,0 +1,56 @@
+%default { "postbarrier":"# no-op", "prebarrier":"# no-op" }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput-object, sput-object-volatile
+ */
+ /* op vAA, field@BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .L${opcode}_finish # is resolved entry null?
+
+ /* Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .L${opcode}_finish # resume
+
+%break
+.L${opcode}_finish: # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ lw t1, offField_clazz(a0) # t1 <- field->clazz
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ $prebarrier # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ $postbarrier
+ beqz a1, 1f
+ srl t2, t1, GC_CARD_SHIFT
+ addu t3, a2, t2
+ sb a2, (t3)
+1:
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_SPUT_OBJECT_JUMBO.S b/vm/mterp/mips/OP_SPUT_OBJECT_JUMBO.S
new file mode 100644
index 000000000..22fa450cb
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_OBJECT_JUMBO.S
@@ -0,0 +1,58 @@
+%default { "postbarrier":" # no-op ", "prebarrier":" # no-op " }
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Jumbo 32-bit SPUT handler for objects
+ */
+ /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1,a0,a1 # a1<- AAAAaaaa
+
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .L${opcode}_finish # is resolved entry null?
+
+ /* Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .L${opcode}_finish # resume
+
+%break
+.L${opcode}_finish: # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ lw t1, offField_clazz(a0) # t1 <- field->clazz
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ $prebarrier # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ $postbarrier
+ beqz a1, 1f
+ srl t2, t1, GC_CARD_SHIFT
+ addu t3, a2, t2
+ sb a2, (t3)
+ 1:
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE.S b/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE.S
new file mode 100644
index 000000000..8b6dc1432
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_OBJECT.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..fd22e6ea3
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_OBJECT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_OBJECT_JUMBO.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SPUT_SHORT.S b/vm/mterp/mips/OP_SPUT_SHORT.S
new file mode 100644
index 000000000..96434b7fa
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_SHORT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S"
diff --git a/vm/mterp/mips/OP_SPUT_SHORT_JUMBO.S b/vm/mterp/mips/OP_SPUT_SHORT_JUMBO.S
new file mode 100644
index 000000000..e183701de
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_SHORT_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S"
diff --git a/vm/mterp/mips/OP_SPUT_VOLATILE.S b/vm/mterp/mips/OP_SPUT_VOLATILE.S
new file mode 100644
index 000000000..9e1f1a5c5
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SPUT_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SPUT_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..7c8e2f437
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_JUMBO.S" {"prebarrier":"SMP_DMB_ST", "postbarrier":"SMP_DMB"}
diff --git a/vm/mterp/mips/OP_SPUT_WIDE.S b/vm/mterp/mips/OP_SPUT_WIDE.S
new file mode 100644
index 000000000..3e1d042e9
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_WIDE.S
@@ -0,0 +1,58 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * 64-bit SPUT handler.
+ */
+ # sput-wide vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ GET_OPA(t0) # t0 <- AA
+ LOAD_eas2(a2, rBIX, a1) # a2 <- resolved StaticField ptr
+ EAS2(rOBJ, rFP, t0) # rOBJ<- &fp[AA]
+ # is resolved entry null?
+ beqz a2, .L${opcode}_resolve # yes, do resolve
+.L${opcode}_finish: # field ptr in a2, AA in rOBJ
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ .if $volatile
+ addu a2, offStaticField_value # a2<- pointer to data
+ JAL(dvmQuasiAtomicSwap64Sync) # stores a0/a1 into addr a2
+ .else
+ STORE64_off(a0, a1, a2, offStaticField_value) # field <- vAA/vAA+1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+%break
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rOBJ: &fp[AA]
+ * rBIX: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in a2.
+ */
+.L${opcode}_resolve:
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ # success ?
+ move a0, v0
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ move a2, v0
+ b .L${opcode}_finish # resume
diff --git a/vm/mterp/mips/OP_SPUT_WIDE_JUMBO.S b/vm/mterp/mips/OP_SPUT_WIDE_JUMBO.S
new file mode 100644
index 000000000..b12ac62b0
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_WIDE_JUMBO.S
@@ -0,0 +1,60 @@
+%default {"volatile":"0"}
+%verify "executed"
+%verify "field already resolved"
+%verify "field not yet resolved"
+%verify "field cannot be resolved"
+ /*
+ * Jumbo 64-bit SPUT handler.
+ */
+ /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ FETCH(rOBJ, 3) # rOBJ<- BBBB solved StaticField ptr
+ EAS2(rOBJ, rFP, t0) # rOBJ<- &fp[BBBB]
+ # is resolved entry null?
+ beqz a2, .L${opcode}_resolve # yes, do resolve
+.L${opcode}_finish: # field ptr in a2, BBBB in rOBJ
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vBBBB/vBBBB+1
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ .if $volatile
+ addu a2, offStaticField_value # a2<- pointer to data
+ JAL(dvmQuasiAtomicSwap64Sync) # stores a0/a1 into addr a2
+ .else
+ STORE64_off(a0, a1, a2, offStaticField_value) # field <- vBBBB/vBBBB+1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+%break
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rOBJ: &fp[BBBB]
+ * rBIX: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in a2.
+ */
+.L${opcode}_resolve:
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ # success ?
+ move a0, v0
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ move a2, v0
+ b .L${opcode}_finish # resume
diff --git a/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE.S b/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE.S
new file mode 100644
index 000000000..359b37fa4
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_WIDE.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE_JUMBO.S b/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE_JUMBO.S
new file mode 100644
index 000000000..6dc59e5b3
--- /dev/null
+++ b/vm/mterp/mips/OP_SPUT_WIDE_VOLATILE_JUMBO.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/OP_SPUT_WIDE_JUMBO.S" {"volatile":"1"}
diff --git a/vm/mterp/mips/OP_SUB_DOUBLE.S b/vm/mterp/mips/OP_SUB_DOUBLE.S
new file mode 100644
index 000000000..3b6fa6d8c
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_DOUBLE.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide.S" {"instr":"JAL(__subdf3)", "instr_f":"sub.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_SUB_DOUBLE_2ADDR.S b/vm/mterp/mips/OP_SUB_DOUBLE_2ADDR.S
new file mode 100644
index 000000000..cdd973e49
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_DOUBLE_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflopWide2addr.S" {"instr":"JAL(__subdf3)", "instr_f":"sub.d fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_SUB_FLOAT.S b/vm/mterp/mips/OP_SUB_FLOAT.S
new file mode 100644
index 000000000..909626706
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_FLOAT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop.S" {"instr":"JAL(__subsf3)", "instr_f":"sub.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_SUB_FLOAT_2ADDR.S b/vm/mterp/mips/OP_SUB_FLOAT_2ADDR.S
new file mode 100644
index 000000000..143b7e6a3
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_FLOAT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binflop2addr.S" {"instr":"JAL(__subsf3)", "instr_f":"sub.s fv0, fa0, fa1"}
diff --git a/vm/mterp/mips/OP_SUB_INT.S b/vm/mterp/mips/OP_SUB_INT.S
new file mode 100644
index 000000000..aaa6a7b2e
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"subu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SUB_INT_2ADDR.S b/vm/mterp/mips/OP_SUB_INT_2ADDR.S
new file mode 100644
index 000000000..0032229dc
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"subu a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_SUB_LONG.S b/vm/mterp/mips/OP_SUB_LONG.S
new file mode 100644
index 000000000..700d4ea3d
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_LONG.S
@@ -0,0 +1,10 @@
+%verify "executed"
+/*
+ * For little endian the code sequence looks as follows:
+ * subu v0,a0,a2
+ * subu v1,a1,a3
+ * sltu a0,a0,v0
+ * subu v1,v1,a0
+ */
+%include "mips/binopWide.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
+
diff --git a/vm/mterp/mips/OP_SUB_LONG_2ADDR.S b/vm/mterp/mips/OP_SUB_LONG_2ADDR.S
new file mode 100644
index 000000000..9b12d6988
--- /dev/null
+++ b/vm/mterp/mips/OP_SUB_LONG_2ADDR.S
@@ -0,0 +1,5 @@
+%verify "executed"
+/*
+ * See comments in OP_SUB_LONG.S
+ */
+%include "mips/binopWide2addr.S" { "result0":"v0", "result1":"v1", "preinstr":"subu v0, a0, a2", "instr":"subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0" }
diff --git a/vm/mterp/mips/OP_THROW.S b/vm/mterp/mips/OP_THROW.S
new file mode 100644
index 000000000..b879b29d4
--- /dev/null
+++ b/vm/mterp/mips/OP_THROW.S
@@ -0,0 +1,15 @@
+%verify "executed"
+%verify "exception for null object"
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a1, a2) # a1 <- vAA (exception object)
+ EXPORT_PC() # exception handler can throw
+ # null object?
+ beqz a1, common_errNullObject # yes, throw an NPE instead
+ # bypass dvmSetException, just store it
+ STORE_offThread_exception(a1, rSELF) # thread->exception <- obj
+ b common_exceptionThrown
+
diff --git a/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR.S b/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR.S
new file mode 100644
index 000000000..a68b256c0
--- /dev/null
+++ b/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR.S
@@ -0,0 +1,15 @@
+%verify executed
+ /*
+ * Handle a throw-verification-error instruction. This throws an
+ * exception for an error discovered during verification. The
+ * exception is indicated by AA, with some detail provided by BBBB.
+ */
+ /* op AA, ref@BBBB */
+
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ FETCH(a2, 1) # a2 <- BBBB
+ EXPORT_PC() # export the PC
+ GET_OPA(a1) # a1 <- AA
+ JAL(dvmThrowVerificationError) # always throws
+ b common_exceptionThrown # handle exception
+
diff --git a/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR_JUMBO.S b/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR_JUMBO.S
new file mode 100644
index 000000000..dbddc4289
--- /dev/null
+++ b/vm/mterp/mips/OP_THROW_VERIFICATION_ERROR_JUMBO.S
@@ -0,0 +1,17 @@
+%verify executed
+ /*
+ * Handle a jumbo throw-verification-error instruction. This throws an
+ * exception for an error discovered during verification. The
+ * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+ */
+ /* exop BBBB, Class@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ sll a2,a2,16
+ or a2, a1, a2 # a2<- AAAAaaaa
+ EXPORT_PC() # export the PC
+ FETCH(a1, 3) # a1<- BBBB
+ JAL(dvmThrowVerificationError) # always throws
+ b common_exceptionThrown # handle exception
+
diff --git a/vm/mterp/mips/OP_UNUSED_27FF.S b/vm/mterp/mips/OP_UNUSED_27FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_27FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_28FF.S b/vm/mterp/mips/OP_UNUSED_28FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_28FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_29FF.S b/vm/mterp/mips/OP_UNUSED_29FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_29FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2AFF.S b/vm/mterp/mips/OP_UNUSED_2AFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2BFF.S b/vm/mterp/mips/OP_UNUSED_2BFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2CFF.S b/vm/mterp/mips/OP_UNUSED_2CFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2DFF.S b/vm/mterp/mips/OP_UNUSED_2DFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2EFF.S b/vm/mterp/mips/OP_UNUSED_2EFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_2FFF.S b/vm/mterp/mips/OP_UNUSED_2FFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_2FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_30FF.S b/vm/mterp/mips/OP_UNUSED_30FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_30FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_31FF.S b/vm/mterp/mips/OP_UNUSED_31FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_31FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_32FF.S b/vm/mterp/mips/OP_UNUSED_32FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_32FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_33FF.S b/vm/mterp/mips/OP_UNUSED_33FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_33FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_34FF.S b/vm/mterp/mips/OP_UNUSED_34FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_34FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_35FF.S b/vm/mterp/mips/OP_UNUSED_35FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_35FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_36FF.S b/vm/mterp/mips/OP_UNUSED_36FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_36FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_37FF.S b/vm/mterp/mips/OP_UNUSED_37FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_37FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_38FF.S b/vm/mterp/mips/OP_UNUSED_38FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_38FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_39FF.S b/vm/mterp/mips/OP_UNUSED_39FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_39FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3AFF.S b/vm/mterp/mips/OP_UNUSED_3AFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3BFF.S b/vm/mterp/mips/OP_UNUSED_3BFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3CFF.S b/vm/mterp/mips/OP_UNUSED_3CFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3DFF.S b/vm/mterp/mips/OP_UNUSED_3DFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3E.S b/vm/mterp/mips/OP_UNUSED_3E.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3E.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3EFF.S b/vm/mterp/mips/OP_UNUSED_3EFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3F.S b/vm/mterp/mips/OP_UNUSED_3F.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3F.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_3FFF.S b/vm/mterp/mips/OP_UNUSED_3FFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_3FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_40.S b/vm/mterp/mips/OP_UNUSED_40.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_40.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_40FF.S b/vm/mterp/mips/OP_UNUSED_40FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_40FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_41.S b/vm/mterp/mips/OP_UNUSED_41.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_41.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_41FF.S b/vm/mterp/mips/OP_UNUSED_41FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_41FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_42.S b/vm/mterp/mips/OP_UNUSED_42.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_42.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_42FF.S b/vm/mterp/mips/OP_UNUSED_42FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_42FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_43.S b/vm/mterp/mips/OP_UNUSED_43.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_43.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_43FF.S b/vm/mterp/mips/OP_UNUSED_43FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_43FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_44FF.S b/vm/mterp/mips/OP_UNUSED_44FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_44FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_45FF.S b/vm/mterp/mips/OP_UNUSED_45FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_45FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_46FF.S b/vm/mterp/mips/OP_UNUSED_46FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_46FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_47FF.S b/vm/mterp/mips/OP_UNUSED_47FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_47FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_48FF.S b/vm/mterp/mips/OP_UNUSED_48FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_48FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_49FF.S b/vm/mterp/mips/OP_UNUSED_49FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_49FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4AFF.S b/vm/mterp/mips/OP_UNUSED_4AFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4BFF.S b/vm/mterp/mips/OP_UNUSED_4BFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4CFF.S b/vm/mterp/mips/OP_UNUSED_4CFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4DFF.S b/vm/mterp/mips/OP_UNUSED_4DFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4EFF.S b/vm/mterp/mips/OP_UNUSED_4EFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_4FFF.S b/vm/mterp/mips/OP_UNUSED_4FFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_4FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_50FF.S b/vm/mterp/mips/OP_UNUSED_50FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_50FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_51FF.S b/vm/mterp/mips/OP_UNUSED_51FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_51FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_52FF.S b/vm/mterp/mips/OP_UNUSED_52FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_52FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_53FF.S b/vm/mterp/mips/OP_UNUSED_53FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_53FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_54FF.S b/vm/mterp/mips/OP_UNUSED_54FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_54FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_55FF.S b/vm/mterp/mips/OP_UNUSED_55FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_55FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_56FF.S b/vm/mterp/mips/OP_UNUSED_56FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_56FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_57FF.S b/vm/mterp/mips/OP_UNUSED_57FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_57FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_58FF.S b/vm/mterp/mips/OP_UNUSED_58FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_58FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_59FF.S b/vm/mterp/mips/OP_UNUSED_59FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_59FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5AFF.S b/vm/mterp/mips/OP_UNUSED_5AFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5BFF.S b/vm/mterp/mips/OP_UNUSED_5BFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5CFF.S b/vm/mterp/mips/OP_UNUSED_5CFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5DFF.S b/vm/mterp/mips/OP_UNUSED_5DFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5EFF.S b/vm/mterp/mips/OP_UNUSED_5EFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_5FFF.S b/vm/mterp/mips/OP_UNUSED_5FFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_5FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_60FF.S b/vm/mterp/mips/OP_UNUSED_60FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_60FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_61FF.S b/vm/mterp/mips/OP_UNUSED_61FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_61FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_62FF.S b/vm/mterp/mips/OP_UNUSED_62FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_62FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_63FF.S b/vm/mterp/mips/OP_UNUSED_63FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_63FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_64FF.S b/vm/mterp/mips/OP_UNUSED_64FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_64FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_65FF.S b/vm/mterp/mips/OP_UNUSED_65FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_65FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_66FF.S b/vm/mterp/mips/OP_UNUSED_66FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_66FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_67FF.S b/vm/mterp/mips/OP_UNUSED_67FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_67FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_68FF.S b/vm/mterp/mips/OP_UNUSED_68FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_68FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_69FF.S b/vm/mterp/mips/OP_UNUSED_69FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_69FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6AFF.S b/vm/mterp/mips/OP_UNUSED_6AFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6BFF.S b/vm/mterp/mips/OP_UNUSED_6BFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6CFF.S b/vm/mterp/mips/OP_UNUSED_6CFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6DFF.S b/vm/mterp/mips/OP_UNUSED_6DFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6EFF.S b/vm/mterp/mips/OP_UNUSED_6EFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_6FFF.S b/vm/mterp/mips/OP_UNUSED_6FFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_6FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_70FF.S b/vm/mterp/mips/OP_UNUSED_70FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_70FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_71FF.S b/vm/mterp/mips/OP_UNUSED_71FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_71FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_72FF.S b/vm/mterp/mips/OP_UNUSED_72FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_72FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_73.S b/vm/mterp/mips/OP_UNUSED_73.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_73.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_73FF.S b/vm/mterp/mips/OP_UNUSED_73FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_73FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_74FF.S b/vm/mterp/mips/OP_UNUSED_74FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_74FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_75FF.S b/vm/mterp/mips/OP_UNUSED_75FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_75FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_76FF.S b/vm/mterp/mips/OP_UNUSED_76FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_76FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_77FF.S b/vm/mterp/mips/OP_UNUSED_77FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_77FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_78FF.S b/vm/mterp/mips/OP_UNUSED_78FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_78FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_79.S b/vm/mterp/mips/OP_UNUSED_79.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_79.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_79FF.S b/vm/mterp/mips/OP_UNUSED_79FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_79FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7A.S b/vm/mterp/mips/OP_UNUSED_7A.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7A.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7AFF.S b/vm/mterp/mips/OP_UNUSED_7AFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7BFF.S b/vm/mterp/mips/OP_UNUSED_7BFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7CFF.S b/vm/mterp/mips/OP_UNUSED_7CFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7DFF.S b/vm/mterp/mips/OP_UNUSED_7DFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7EFF.S b/vm/mterp/mips/OP_UNUSED_7EFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_7FFF.S b/vm/mterp/mips/OP_UNUSED_7FFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_7FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_80FF.S b/vm/mterp/mips/OP_UNUSED_80FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_80FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_81FF.S b/vm/mterp/mips/OP_UNUSED_81FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_81FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_82FF.S b/vm/mterp/mips/OP_UNUSED_82FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_82FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_83FF.S b/vm/mterp/mips/OP_UNUSED_83FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_83FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_84FF.S b/vm/mterp/mips/OP_UNUSED_84FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_84FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_85FF.S b/vm/mterp/mips/OP_UNUSED_85FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_85FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_86FF.S b/vm/mterp/mips/OP_UNUSED_86FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_86FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_87FF.S b/vm/mterp/mips/OP_UNUSED_87FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_87FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_88FF.S b/vm/mterp/mips/OP_UNUSED_88FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_88FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_89FF.S b/vm/mterp/mips/OP_UNUSED_89FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_89FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8AFF.S b/vm/mterp/mips/OP_UNUSED_8AFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8BFF.S b/vm/mterp/mips/OP_UNUSED_8BFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8CFF.S b/vm/mterp/mips/OP_UNUSED_8CFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8DFF.S b/vm/mterp/mips/OP_UNUSED_8DFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8EFF.S b/vm/mterp/mips/OP_UNUSED_8EFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_8FFF.S b/vm/mterp/mips/OP_UNUSED_8FFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_8FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_90FF.S b/vm/mterp/mips/OP_UNUSED_90FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_90FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_91FF.S b/vm/mterp/mips/OP_UNUSED_91FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_91FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_92FF.S b/vm/mterp/mips/OP_UNUSED_92FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_92FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_93FF.S b/vm/mterp/mips/OP_UNUSED_93FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_93FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_94FF.S b/vm/mterp/mips/OP_UNUSED_94FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_94FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_95FF.S b/vm/mterp/mips/OP_UNUSED_95FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_95FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_96FF.S b/vm/mterp/mips/OP_UNUSED_96FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_96FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_97FF.S b/vm/mterp/mips/OP_UNUSED_97FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_97FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_98FF.S b/vm/mterp/mips/OP_UNUSED_98FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_98FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_99FF.S b/vm/mterp/mips/OP_UNUSED_99FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_99FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9AFF.S b/vm/mterp/mips/OP_UNUSED_9AFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9AFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9BFF.S b/vm/mterp/mips/OP_UNUSED_9BFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9BFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9CFF.S b/vm/mterp/mips/OP_UNUSED_9CFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9CFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9DFF.S b/vm/mterp/mips/OP_UNUSED_9DFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9DFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9EFF.S b/vm/mterp/mips/OP_UNUSED_9EFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9EFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_9FFF.S b/vm/mterp/mips/OP_UNUSED_9FFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_9FFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A0FF.S b/vm/mterp/mips/OP_UNUSED_A0FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A1FF.S b/vm/mterp/mips/OP_UNUSED_A1FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A2FF.S b/vm/mterp/mips/OP_UNUSED_A2FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A3FF.S b/vm/mterp/mips/OP_UNUSED_A3FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A4FF.S b/vm/mterp/mips/OP_UNUSED_A4FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A5FF.S b/vm/mterp/mips/OP_UNUSED_A5FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A6FF.S b/vm/mterp/mips/OP_UNUSED_A6FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A7FF.S b/vm/mterp/mips/OP_UNUSED_A7FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A8FF.S b/vm/mterp/mips/OP_UNUSED_A8FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_A9FF.S b/vm/mterp/mips/OP_UNUSED_A9FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_A9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_AAFF.S b/vm/mterp/mips/OP_UNUSED_AAFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_AAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ABFF.S b/vm/mterp/mips/OP_UNUSED_ABFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ABFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ACFF.S b/vm/mterp/mips/OP_UNUSED_ACFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ACFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ADFF.S b/vm/mterp/mips/OP_UNUSED_ADFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ADFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_AEFF.S b/vm/mterp/mips/OP_UNUSED_AEFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_AEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_AFFF.S b/vm/mterp/mips/OP_UNUSED_AFFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_AFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B0FF.S b/vm/mterp/mips/OP_UNUSED_B0FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B1FF.S b/vm/mterp/mips/OP_UNUSED_B1FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B2FF.S b/vm/mterp/mips/OP_UNUSED_B2FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B3FF.S b/vm/mterp/mips/OP_UNUSED_B3FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B4FF.S b/vm/mterp/mips/OP_UNUSED_B4FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B5FF.S b/vm/mterp/mips/OP_UNUSED_B5FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B6FF.S b/vm/mterp/mips/OP_UNUSED_B6FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B7FF.S b/vm/mterp/mips/OP_UNUSED_B7FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B8FF.S b/vm/mterp/mips/OP_UNUSED_B8FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_B9FF.S b/vm/mterp/mips/OP_UNUSED_B9FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_B9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BAFF.S b/vm/mterp/mips/OP_UNUSED_BAFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BBFF.S b/vm/mterp/mips/OP_UNUSED_BBFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BBFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BCFF.S b/vm/mterp/mips/OP_UNUSED_BCFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BCFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BDFF.S b/vm/mterp/mips/OP_UNUSED_BDFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BDFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BEFF.S b/vm/mterp/mips/OP_UNUSED_BEFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_BFFF.S b/vm/mterp/mips/OP_UNUSED_BFFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_BFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C0FF.S b/vm/mterp/mips/OP_UNUSED_C0FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C1FF.S b/vm/mterp/mips/OP_UNUSED_C1FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C2FF.S b/vm/mterp/mips/OP_UNUSED_C2FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C3FF.S b/vm/mterp/mips/OP_UNUSED_C3FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C4FF.S b/vm/mterp/mips/OP_UNUSED_C4FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C5FF.S b/vm/mterp/mips/OP_UNUSED_C5FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C6FF.S b/vm/mterp/mips/OP_UNUSED_C6FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C7FF.S b/vm/mterp/mips/OP_UNUSED_C7FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C8FF.S b/vm/mterp/mips/OP_UNUSED_C8FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_C9FF.S b/vm/mterp/mips/OP_UNUSED_C9FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_C9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CAFF.S b/vm/mterp/mips/OP_UNUSED_CAFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CBFF.S b/vm/mterp/mips/OP_UNUSED_CBFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CBFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CCFF.S b/vm/mterp/mips/OP_UNUSED_CCFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CCFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CDFF.S b/vm/mterp/mips/OP_UNUSED_CDFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CDFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CEFF.S b/vm/mterp/mips/OP_UNUSED_CEFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_CFFF.S b/vm/mterp/mips/OP_UNUSED_CFFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_CFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D0FF.S b/vm/mterp/mips/OP_UNUSED_D0FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D1FF.S b/vm/mterp/mips/OP_UNUSED_D1FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D2FF.S b/vm/mterp/mips/OP_UNUSED_D2FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D3FF.S b/vm/mterp/mips/OP_UNUSED_D3FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D4FF.S b/vm/mterp/mips/OP_UNUSED_D4FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D5FF.S b/vm/mterp/mips/OP_UNUSED_D5FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D6FF.S b/vm/mterp/mips/OP_UNUSED_D6FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D7FF.S b/vm/mterp/mips/OP_UNUSED_D7FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D8FF.S b/vm/mterp/mips/OP_UNUSED_D8FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_D9FF.S b/vm/mterp/mips/OP_UNUSED_D9FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_D9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DAFF.S b/vm/mterp/mips/OP_UNUSED_DAFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DBFF.S b/vm/mterp/mips/OP_UNUSED_DBFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DBFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DCFF.S b/vm/mterp/mips/OP_UNUSED_DCFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DCFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DDFF.S b/vm/mterp/mips/OP_UNUSED_DDFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DDFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DEFF.S b/vm/mterp/mips/OP_UNUSED_DEFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_DFFF.S b/vm/mterp/mips/OP_UNUSED_DFFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_DFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E0FF.S b/vm/mterp/mips/OP_UNUSED_E0FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E1FF.S b/vm/mterp/mips/OP_UNUSED_E1FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E2FF.S b/vm/mterp/mips/OP_UNUSED_E2FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E2FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E3.S b/vm/mterp/mips/OP_UNUSED_E3.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E3.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E3FF.S b/vm/mterp/mips/OP_UNUSED_E3FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E3FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E4.S b/vm/mterp/mips/OP_UNUSED_E4.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E4.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E4FF.S b/vm/mterp/mips/OP_UNUSED_E4FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E4FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E5.S b/vm/mterp/mips/OP_UNUSED_E5.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E5.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E5FF.S b/vm/mterp/mips/OP_UNUSED_E5FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E5FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E6.S b/vm/mterp/mips/OP_UNUSED_E6.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E6.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E6FF.S b/vm/mterp/mips/OP_UNUSED_E6FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E6FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E7.S b/vm/mterp/mips/OP_UNUSED_E7.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E7.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E7FF.S b/vm/mterp/mips/OP_UNUSED_E7FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E7FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E8.S b/vm/mterp/mips/OP_UNUSED_E8.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E8.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E8FF.S b/vm/mterp/mips/OP_UNUSED_E8FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E8FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E9.S b/vm/mterp/mips/OP_UNUSED_E9.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E9.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_E9FF.S b/vm/mterp/mips/OP_UNUSED_E9FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_E9FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EA.S b/vm/mterp/mips/OP_UNUSED_EA.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EA.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EAFF.S b/vm/mterp/mips/OP_UNUSED_EAFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EAFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EB.S b/vm/mterp/mips/OP_UNUSED_EB.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EB.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EBFF.S b/vm/mterp/mips/OP_UNUSED_EBFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EBFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EC.S b/vm/mterp/mips/OP_UNUSED_EC.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EC.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ECFF.S b/vm/mterp/mips/OP_UNUSED_ECFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ECFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_ED.S b/vm/mterp/mips/OP_UNUSED_ED.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_ED.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EDFF.S b/vm/mterp/mips/OP_UNUSED_EDFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EDFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EEFF.S b/vm/mterp/mips/OP_UNUSED_EEFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EEFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EF.S b/vm/mterp/mips/OP_UNUSED_EF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_EFFF.S b/vm/mterp/mips/OP_UNUSED_EFFF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_EFFF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_F0FF.S b/vm/mterp/mips/OP_UNUSED_F0FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_F0FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_F1.S b/vm/mterp/mips/OP_UNUSED_F1.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_F1.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_F1FF.S b/vm/mterp/mips/OP_UNUSED_F1FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_F1FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_FC.S b/vm/mterp/mips/OP_UNUSED_FC.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_FC.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_FD.S b/vm/mterp/mips/OP_UNUSED_FD.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_FD.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_FE.S b/vm/mterp/mips/OP_UNUSED_FE.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_FE.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_UNUSED_FF.S b/vm/mterp/mips/OP_UNUSED_FF.S
new file mode 100644
index 000000000..99ef3cf30
--- /dev/null
+++ b/vm/mterp/mips/OP_UNUSED_FF.S
@@ -0,0 +1 @@
+%include "mips/unused.S"
diff --git a/vm/mterp/mips/OP_USHR_INT.S b/vm/mterp/mips/OP_USHR_INT.S
new file mode 100644
index 000000000..7b474b6c8
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"preinstr":"and a1, a1, 31", "instr":"srl a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_USHR_INT_2ADDR.S b/vm/mterp/mips/OP_USHR_INT_2ADDR.S
new file mode 100644
index 000000000..71b5e36a5
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"preinstr":"and a1, a1, 31", "instr":"srl a0, a0, a1 "}
diff --git a/vm/mterp/mips/OP_USHR_INT_LIT8.S b/vm/mterp/mips/OP_USHR_INT_LIT8.S
new file mode 100644
index 000000000..7dbe86390
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"preinstr":"and a1, a1, 31", "instr":"srl a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_USHR_LONG.S b/vm/mterp/mips/OP_USHR_LONG.S
new file mode 100644
index 000000000..acd9d15e7
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_LONG.S
@@ -0,0 +1,32 @@
+%verify "executed"
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t0) # t3 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+ EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ andi a2, 0x20 # shift & 0x20
+ movn v0, v1, a2 # rlo<- rhi (if shift&0x20)
+ movn v1, zero, a2 # rhi<- 0 (if shift&0x20)
+
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_USHR_LONG_2ADDR.S b/vm/mterp/mips/OP_USHR_LONG_2ADDR.S
new file mode 100644
index 000000000..103cc983d
--- /dev/null
+++ b/vm/mterp/mips/OP_USHR_LONG_2ADDR.S
@@ -0,0 +1,27 @@
+%verify "executed"
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ GET_OPA4(t3) # t3 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t3, rFP, t3) # t3 <- &fp[A]
+ LOAD64(a0, a1, t3) # a0/a1 <- vAA/vAA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ andi a2, 0x20 # shift & 0x20
+ movn v0, v1, a2 # rlo<- rhi (if shift&0x20)
+ movn v1, zero, a2 # rhi<- 0 (if shift&0x20)
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, t3) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
diff --git a/vm/mterp/mips/OP_XOR_INT.S b/vm/mterp/mips/OP_XOR_INT.S
new file mode 100644
index 000000000..6551e7583
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_INT.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop.S" {"instr":"xor a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_XOR_INT_2ADDR.S b/vm/mterp/mips/OP_XOR_INT_2ADDR.S
new file mode 100644
index 000000000..f93b7826d
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_INT_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binop2addr.S" {"instr":"xor a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_XOR_INT_LIT16.S b/vm/mterp/mips/OP_XOR_INT_LIT16.S
new file mode 100644
index 000000000..add8ef2b5
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_INT_LIT16.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit16.S" {"instr":"xor a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_XOR_INT_LIT8.S b/vm/mterp/mips/OP_XOR_INT_LIT8.S
new file mode 100644
index 000000000..31fa36069
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_INT_LIT8.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopLit8.S" {"instr":"xor a0, a0, a1"}
diff --git a/vm/mterp/mips/OP_XOR_LONG.S b/vm/mterp/mips/OP_XOR_LONG.S
new file mode 100644
index 000000000..1f07c8474
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_LONG.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/vm/mterp/mips/OP_XOR_LONG_2ADDR.S b/vm/mterp/mips/OP_XOR_LONG_2ADDR.S
new file mode 100644
index 000000000..dade7a9a6
--- /dev/null
+++ b/vm/mterp/mips/OP_XOR_LONG_2ADDR.S
@@ -0,0 +1,2 @@
+%verify "executed"
+%include "mips/binopWide2addr.S" {"preinstr":"xor a0, a0, a2", "instr":"xor a1, a1, a3"}
diff --git a/vm/mterp/mips/alt_stub.S b/vm/mterp/mips/alt_stub.S
new file mode 100644
index 000000000..edf71a7aa
--- /dev/null
+++ b/vm/mterp/mips/alt_stub.S
@@ -0,0 +1,20 @@
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (${opnum} * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
diff --git a/vm/mterp/mips/bincmp.S b/vm/mterp/mips/bincmp.S
new file mode 100644
index 000000000..e2398d0e0
--- /dev/null
+++ b/vm/mterp/mips/bincmp.S
@@ -0,0 +1,35 @@
+%verify "branch taken"
+%verify "branch not taken"
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ b${revcmp} a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(a1, 1) # a1<- branch offset, in code units
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a2, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a2, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+3:
+ bnez a0, common_updateProfile
+#else
+ bgez a2, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/mips/binflop.S b/vm/mterp/mips/binflop.S
new file mode 100644
index 000000000..6b0270762
--- /dev/null
+++ b/vm/mterp/mips/binflop.S
@@ -0,0 +1,44 @@
+%default {"preinstr":"", "chkzero":"0"}
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+#ifdef SOFT_FLOAT
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ .if $chkzero
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1 # condition bit and comparision with 0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $preinstr # optional op
+#ifdef SOFT_FLOAT
+ $instr # v0 = result
+ SET_VREG(v0, rOBJ) # vAA <- v0
+#else
+ $instr_f # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 11-14 instructions */
+
diff --git a/vm/mterp/mips/binflop2addr.S b/vm/mterp/mips/binflop2addr.S
new file mode 100644
index 000000000..c20a1c636
--- /dev/null
+++ b/vm/mterp/mips/binflop2addr.S
@@ -0,0 +1,45 @@
+%default {"preinstr":"", "chkzero":"0"}
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" and
+ * "instr_f" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ .if $chkzero
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+#ifdef SOFT_FLOAT
+ $instr # result <- op, a0-a3 changed
+ SET_VREG(v0, rOBJ) # vAA <- result
+#else
+ $instr_f
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-13 instructions */
+
diff --git a/vm/mterp/mips/binflopWide.S b/vm/mterp/mips/binflopWide.S
new file mode 100644
index 000000000..ad6168044
--- /dev/null
+++ b/vm/mterp/mips/binflopWide.S
@@ -0,0 +1,52 @@
+%default {"preinstr":"", "chkzero":"0"}
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, t1) # a2/a3 <- vCC/vCC+1
+ .if $chkzero
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+ .if $chkzero
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $preinstr # optional op
+#ifdef SOFT_FLOAT
+ $instr # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ $instr_f
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
diff --git a/vm/mterp/mips/binflopWide2addr.S b/vm/mterp/mips/binflopWide2addr.S
new file mode 100644
index 000000000..aacd4825c
--- /dev/null
+++ b/vm/mterp/mips/binflopWide2addr.S
@@ -0,0 +1,46 @@
+%default {"preinstr":"", "chkzero":"0"}
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG2, rARG3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if $chkzero
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, rOBJ)
+ LOAD64_F(fa1, fa1f, a1)
+ .if $chkzero
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+#ifdef SOFT_FLOAT
+ $instr # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ $instr_f
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
diff --git a/vm/mterp/mips/binop.S b/vm/mterp/mips/binop.S
new file mode 100644
index 000000000..8bbe0fb8e
--- /dev/null
+++ b/vm/mterp/mips/binop.S
@@ -0,0 +1,34 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+ /* 11-14 instructions */
+
diff --git a/vm/mterp/mips/binop2addr.S b/vm/mterp/mips/binop2addr.S
new file mode 100644
index 000000000..acca20d66
--- /dev/null
+++ b/vm/mterp/mips/binop2addr.S
@@ -0,0 +1,30 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+ /* 10-13 instructions */
+
diff --git a/vm/mterp/mips/binopLit16.S b/vm/mterp/mips/binopLit16.S
new file mode 100644
index 000000000..74b453374
--- /dev/null
+++ b/vm/mterp/mips/binopLit16.S
@@ -0,0 +1,30 @@
+%default {"result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if $chkzero
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+ /* 10-13 instructions */
+
diff --git a/vm/mterp/mips/binopLit8.S b/vm/mterp/mips/binopLit8.S
new file mode 100644
index 000000000..c3d7464eb
--- /dev/null
+++ b/vm/mterp/mips/binopLit8.S
@@ -0,0 +1,32 @@
+%default {"preinstr":"", "result":"a0", "chkzero":"0"}
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if $chkzero
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # $result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO($result, rOBJ, t0) # vAA <- $result
+ /* 10-12 instructions */
+
diff --git a/vm/mterp/mips/binopWide.S b/vm/mterp/mips/binopWide.S
new file mode 100644
index 000000000..3e47ab924
--- /dev/null
+++ b/vm/mterp/mips/binopWide.S
@@ -0,0 +1,38 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64($arg0, $arg1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64($arg2, $arg3, t1) # a2/a3 <- vCC/vCC+1
+ .if $chkzero
+ or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64($result0, $result1, rOBJ) # vAA/vAA+1 <- $result0/$result1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
diff --git a/vm/mterp/mips/binopWide2addr.S b/vm/mterp/mips/binopWide2addr.S
new file mode 100644
index 000000000..7494604f8
--- /dev/null
+++ b/vm/mterp/mips/binopWide2addr.S
@@ -0,0 +1,34 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1", "chkzero":"0", "arg0":"a0", "arg1":"a1", "arg2":"a2", "arg3":"a3"}
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64($arg2, $arg3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64($arg0, $arg1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if $chkzero
+ or t0, $arg2, $arg3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ $preinstr # optional op
+ $instr # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64($result0, $result1, rOBJ) # vAA/vAA+1 <- $result0/$result1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
diff --git a/vm/mterp/mips/debug.cpp b/vm/mterp/mips/debug.cpp
new file mode 100644
index 000000000..0de6b67f1
--- /dev/null
+++ b/vm/mterp/mips/debug.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose MIPS registers, along with some other info.
+ *
+ */
+void dvmMterpDumpMipsRegs(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3)
+{
+ register uint32_t rPC asm("s0");
+ register uint32_t rFP asm("s1");
+ register uint32_t rSELF asm("s2");
+ register uint32_t rIBASE asm("s3");
+ register uint32_t rINST asm("s4");
+ register uint32_t rOBJ asm("s5");
+ register uint32_t rBIX asm("s6");
+ register uint32_t rTEMP asm("s7");
+
+ //extern char dvmAsmInstructionStart[];
+
+ printf("REGS: a0=%08x a1=%08x a2=%08x a3=%08x\n", a0, a1, a2, a3);
+ printf(" : rPC=%08x rFP=%08x rSELF=%08x rIBASE=%08x\n",
+ rPC, rFP, rSELF, rIBASE);
+ printf(" : rINST=%08x rOBJ=%08x rBIX=%08x rTEMP=%08x \n", rINST, rOBJ, rBIX, rTEMP);
+
+ //Thread* self = (Thread*) rSELF;
+ //const Method* method = self->method;
+ printf(" + self is %p\n", dvmThreadSelf());
+ //printf(" + currently in %s.%s %s\n",
+ // method->clazz->descriptor, method->name, method->signature);
+ //printf(" + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+ //printf(" + next handler for 0x%02x = %p\n",
+ // rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+ StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+ printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+ printf(" prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+ saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc);
+#else
+ printf(" prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+ saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc,
+ *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+ /*
+ * It is a direct (non-virtual) method if it is static, private,
+ * or a constructor.
+ */
+ bool isDirect =
+ ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+ (method->name[0] == '<');
+
+ char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+
+ printf("<%c:%s.%s %s> ",
+ isDirect ? 'D' : 'V',
+ method->clazz->descriptor,
+ method->name,
+ desc);
+
+ free(desc);
+}
diff --git a/vm/mterp/mips/entry.S b/vm/mterp/mips/entry.S
new file mode 100644
index 000000000..8a1b61a9e
--- /dev/null
+++ b/vm/mterp/mips/entry.S
@@ -0,0 +1,107 @@
+
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+#define ASSIST_DEBUGGER 1
+
+ .text
+ .align 2
+ .global dvmMterpStdRun
+ .ent dvmMterpStdRun
+ .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ * r0 Thread* self
+ *
+ * The return comes via a call to dvmMterpStdBail().
+ */
+
+dvmMterpStdRun:
+ .set noreorder
+ .cpload t9
+ .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+ STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+ .cprestore STACK_OFFSET_GP
+
+ addu fp, sp, STACK_SIZE # Move Frame Pointer to the base of frame
+ /* save stack pointer, add magic word for debuggerd */
+ sw sp, offThread_bailPtr(a0) # Save SP
+
+ /* set up "named" registers, figure out entry point */
+ move rSELF, a0 # set rSELF
+ LOAD_PC_FROM_SELF()
+ LOAD_FP_FROM_SELF()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+
+#if defined(WITH_JIT)
+.LentryInstr:
+ /* Entry is always a possible trace start */
+ lw a0, offThread_pJitProfTable(rSELF)
+ FETCH_INST() # load rINST from rPC
+ sw zero, offThread_inJitCodeCache(rSELF)
+#if !defined(WITH_SELF_VERIFICATION)
+ bnez a0, common_updateProfile # profiling is enabled
+#else
+ lw a2, offThread_shadowSpace(rSELF) # to find out the jit exit state
+ beqz a0, 1f # profiling is disabled
+ lw a3, offShadowSpace_jitExitState(a2) # jit exit state
+ li t0, kSVSTraceSelect
+ bne a3, t0, 2f
+ li a2, kJitTSelectRequestHot # ask for trace selection
+ b common_selectTrace # go build the trace
+2:
+ li a4, kSVSNoProfile
+ beq a3, a4, 1f # don't profile the next instruction?
+ b common_updateProfile # collect profiles
+#endif
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ /* start executing the instruction at rPC */
+ FETCH_INST() # load rINST from rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+.Lbad_arg:
+ la a0, .LstrBadEntryPoint
+ #a1 holds value of entryPoint
+ JAL(printf)
+ JAL(dvmAbort)
+
+ .end dvmMterpStdRun
+
+ .global dvmMterpStdBail
+ .ent dvmMterpStdBail
+
+/* Restore the stack pointer and all the registers stored at sp from the save
+ * point established on entry. Return to whoever called dvmMterpStdRun.
+ *
+ * On entry:
+ * a0 Thread* self
+ */
+dvmMterpStdBail:
+ lw sp, offThread_bailPtr(a0) # Restore sp
+ STACK_LOAD_FULL()
+ jr ra
+
+ .end dvmMterpStdBail
diff --git a/vm/mterp/mips/footer.S b/vm/mterp/mips/footer.S
new file mode 100644
index 000000000..b5b53b7f7
--- /dev/null
+++ b/vm/mterp/mips/footer.S
@@ -0,0 +1,1205 @@
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+
+/*
+ * "longjmp" to a translation after single-stepping. Before returning
+ * to translation, must save state for self-verification.
+ */
+ .global dvmJitResumeTranslation # (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+ move rSELF, a0 # restore self
+ move rPC, a1 # restore Dalvik pc
+ move rFP, a2 # restore Dalvik fp
+ lw rBIX, offThread_jitResumeNPC(rSELF)
+ sw zero, offThread_jitResumeNPC(rSELF) # reset resume address
+ lw sp, offThread_jitResumeNSP(rSELF) # cut back native stack
+ b jitSVShadowRunStart # resume as if cache hit
+ # expects resume addr in rBIX
+
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ li a2, kSVSPunt # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ move rPC, a0 # set up dalvik pc
+ EXPORT_PC()
+ sw ra, offThread_jitResumeNPC(rSELF)
+ sw a1, offThread_jitResumeDPC(rSELF)
+ li a2, kSVSSingleStep # a2 <- interpreter entry point
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+ move a0, rPC # pass our target PC
+ li a2, kSVSNoProfile # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+ move a0, rPC # pass our target PC
+ li a2, kSVSTraceSelect # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ lw a0, 0(ra) # pass our target PC
+ li a2, kSVSTraceSelect # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpBackwardBranch
+dvmJitToInterpBackwardBranch:
+ lw a0, 0(ra) # pass our target PC
+ li a2, kSVSBackwardBranch # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ lw a0, 0(ra) # pass our target PC
+ li a2, kSVSNormal # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ move a0, rPC # pass our target PC
+ li a2, kSVSNoChain # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+#else /* WITH_SELF_VERIFICATION */
+
+
+/*
+ * "longjmp" to a translation after single-stepping.
+ */
+ .global dvmJitResumeTranslation # (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+ move rSELF, a0 # restore self
+ move rPC, a1 # restore Dalvik pc
+ move rFP, a2 # restore Dalvik fp
+ lw a0, offThread_jitResumeNPC(rSELF)
+ sw zero, offThread_jitResumeNPC(rSELF) # reset resume address
+ lw sp, offThread_jitResumeNSP(rSELF) # cut back native stack
+ jr a0 # resume translation
+
+
+/*
+ * Return from the translation cache to the interpreter when the compiler is
+ * having issues translating/executing a Dalvik instruction. We have to skip
+ * the code cache lookup otherwise it is possible to indefinitely bouce
+ * between the interpreter and the code cache if the instruction that fails
+ * to be compiled happens to be at a trace start.
+ */
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ lw gp, STACK_OFFSET_GP(sp)
+ move rPC, a0
+#if defined(WITH_JIT_TUNING)
+ move a0, ra
+ JAL(dvmBumpPunt)
+#endif
+ EXPORT_PC()
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+/*
+ * Return to the interpreter to handle a single instruction.
+ * On entry:
+ * rPC <= Dalvik PC of instrucion to interpret
+ * a1 <= Dalvik PC of resume instruction
+ * ra <= resume point in translation
+ */
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ lw gp, STACK_OFFSET_GP(sp)
+ move rPC, a0 # set up dalvik pc
+ EXPORT_PC()
+ sw ra, offThread_jitResumeNPC(rSELF)
+ sw sp, offThread_jitResumeNSP(rSELF)
+ sw a1, offThread_jitResumeDPC(rSELF)
+ li a1, 1
+ sw a1, offThread_singleStepCount(rSELF) # just step once
+ move a0, rSELF
+ li a1, kSubModeCountedStep
+ JAL(dvmEnableSubMode) # (self, subMode)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target. Commonly used for callees.
+ */
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+ lw gp, STACK_OFFSET_GP(sp)
+#if defined(WITH_JIT_TUNING)
+ JAL(dvmBumpNoChain)
+#endif
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # (pc, self)
+ move a0, v0
+ sw a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ beqz a0, 2f # 0 means translation does not exist
+ jr a0
+
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target. Commonly used following
+ * invokes.
+ */
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ lw gp, STACK_OFFSET_GP(sp)
+ lw rPC, (ra) # get our target PC
+ subu rINST, ra, 8 # save start of chain branch
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # @ (pc, self)
+ sw v0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ beqz v0, 2f
+ move a0, v0
+ move a1, rINST
+ JAL(dvmJitChain) # v0 <- dvmJitChain(codeAddr, chainAddr)
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ move a0, v0
+ beqz a0, toInterpreter # didn't chain - resume with interpreter
+
+ jr a0 # continue native execution
+
+/* No translation, so request one if profiling isn't disabled */
+2:
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ lw a0, offThread_pJitProfTable(rSELF)
+ FETCH_INST()
+ li t0, kJitTSelectRequestHot
+ movn a2, t0, a0 # ask for trace selection
+ bnez a0, common_selectTrace
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+/*
+ * Return from the translation cache to the interpreter.
+ * The return was done with a BLX from thumb mode, and
+ * the following 32-bit word contains the target rPC value.
+ * Note that lr (r14) will have its low-order bit set to denote
+ * its thumb-mode origin.
+ *
+ * We'll need to stash our lr origin away, recover the new
+ * target and then check to see if there is a translation available
+ * for our new target. If so, we do a translation chain and
+ * go back to native execution. Otherwise, it's back to the
+ * interpreter (after treating this entry as a potential
+ * trace start).
+ */
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ lw gp, STACK_OFFSET_GP(sp)
+ lw rPC, (ra) # get our target PC
+ subu rINST, ra, 8 # save start of chain branch
+#if defined(WITH_JIT_TUNING)
+ JAL(dvmBumpNormal)
+#endif
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # @ (pc, self)
+ move a0, v0
+ sw a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ beqz a0, toInterpreter # go if not, otherwise do chain
+ move a1, rINST
+ JAL(dvmJitChain) # v0 <- dvmJitChain(codeAddr, chainAddr)
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ move a0, v0
+ beqz a0, toInterpreter # didn't chain - resume with interpreter
+
+ jr a0 # continue native execution
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+#if defined(WITH_JIT_TUNING)
+ JAL(dvmBumpNoChain)
+#endif
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # (pc, self)
+ move a0, v0
+ sw a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ beqz a0, footer235
+
+ jr a0 # continue native execution if so
+footer235:
+ EXPORT_PC()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ lw gp, STACK_OFFSET_GP(sp)
+#if defined(WITH_JIT_TUNING)
+ JAL(dvmBumpNoChain)
+#endif
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # (pc, self)
+ move a0, v0
+ sw a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ beqz a0, 1f
+ jr a0 # continue native execution if so
+1:
+#endif /* WITH_SELF_VERIFICATION */
+
+/*
+ * No translation, restore interpreter regs and start interpreting.
+ * rSELF & rFP were preserved in the translated code, and rPC has
+ * already been restored by the time we get here. We'll need to set
+ * up rIBASE & rINST, and load the address of the JitTable into r0.
+ */
+
+toInterpreter:
+ EXPORT_PC()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ lw a0, offThread_pJitProfTable(rSELF)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ # NOTE: intended fallthrough
+
+/*
+ * Similar to common_updateProfile, but tests for null pJitProfTable
+ * r0 holds pJifProfTAble, rINST is loaded, rPC is current and
+ * rIBASE has been recently refreshed.
+ */
+
+common_testUpdateProfile:
+
+ beqz a0, 4f
+
+/*
+ * Common code to update potential trace start counter, and initiate
+ * a trace-build if appropriate.
+ * On entry here:
+ * r0 <= pJitProfTable (verified non-NULL)
+ * rPC <= Dalvik PC
+ * rINST <= next instruction
+ */
+common_updateProfile:
+ srl a3, rPC, 12 # cheap, but fast hash function
+ xor a3, a3, rPC
+ andi a3, a3, JIT_PROF_SIZE-1 # eliminate excess bits
+ addu t1, a0, a3
+ lbu a1, (t1) # get counter
+ GET_INST_OPCODE(t0)
+ subu a1, a1, 1 # decrement counter
+ sb a1, (t1) # and store it
+ beqz a1, 1f
+ GOTO_OPCODE(t0) # if not threshold, fallthrough otherwise
+1:
+ /* Looks good, reset the counter */
+ lw a1, offThread_jitThreshold(rSELF)
+ sb a1, (t1)
+ EXPORT_PC()
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # (pc, self)
+ move a0, v0
+ sw v0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+
+#if !defined(WITH_SELF_VERIFICATION)
+ li t0, kJitTSelectRequest # ask for trace selection
+ movz a2, t0, a0
+ beqz a0, common_selectTrace
+ jr a0 # jump to the translation
+#else
+
+ bne a0, zero, skip_ask_for_trace_selection
+ li a2, kJitTSelectRequest # ask for trace selection
+ j common_selectTrace
+
+skip_ask_for_trace_selection:
+ /*
+ * At this point, we have a target translation. However, if
+ * that translation is actually the interpret-only pseudo-translation
+ * we want to treat it the same as no translation.
+ */
+ move rBIX, a0 # save target
+ jal dvmCompilerGetInterpretTemplate
+ # special case?
+ bne v0, rBIX, jitSVShadowRunStart # set up self verification shadow space
+ # Need to clear the inJitCodeCache flag
+ sw zero, offThread_inJitCodeCache(rSELF) # back to the interp land
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+ /* no return */
+#endif
+
+/*
+ * On entry:
+ * r2 is jit state.
+ */
+
+common_selectTrace:
+ lhu a0, offThread_subMode(rSELF)
+ andi a0, (kSubModeJitTraceBuild | kSubModeJitSV)
+ bnez a0, 3f # already doing JIT work, continue
+ sw a2, offThread_jitState(rSELF)
+ move a0, rSELF
+
+/*
+ * Call out to validate trace-building request. If successful,
+ * rIBASE will be swapped to to send us into single-stepping trace
+ * building mode, so we need to refresh before we continue.
+ */
+
+ EXPORT_PC()
+ SAVE_PC_TO_SELF()
+ SAVE_FP_TO_SELF()
+ JAL(dvmJitCheckTraceRequest)
+3:
+ FETCH_INST()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+4:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0)
+ /* no return */
+#endif
+
+#if defined(WITH_SELF_VERIFICATION)
+
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ * On entry:
+ * rPC, rFP, rSELF: the values that they should contain
+ * r10: the address of the target translation.
+ */
+jitSVShadowRunStart:
+ move a0, rPC # r0 <- program counter
+ move a1, rFP # r1 <- frame pointer
+ move a2, rSELF # r2 <- InterpState pointer
+ move a3, rBIX # r3 <- target translation
+ jal dvmSelfVerificationSaveState # save registers to shadow space
+ lw rFP, offShadowSpace_shadowFP(v0) # rFP <- fp in shadow space
+ jr rBIX # jump to the translation
+
+/*
+ * Restore PC, registers, and interpState to original values
+ * before jumping back to the interpreter.
+ */
+jitSVShadowRunEnd:
+ move a1, rFP # pass ending fp
+ move a3, rSELF # pass self ptr for convenience
+ jal dvmSelfVerificationRestoreState # restore pc and fp values
+ LOAD_PC_FP_FROM_SELF() # restore pc, fp
+ lw a1, offShadowSpace_svState(a0) # get self verification state
+ beq a1, zero, 1f # check for punt condition
+
+ # Setup SV single-stepping
+ move a0, rSELF
+ li a1, kSubModeJitSV
+ JAL(dvmEnableSubMode) # (self, subMode)
+ li a2, kJitSelfVerification # ask for self verification
+ sw a2, offThread_jitState(rSELF)
+ # Intentional fallthrough
+
+1:
+ # exit to interpreter without check
+ EXPORT_PC()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+#endif
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ * It will end this interpreter activation, and return to the caller
+ * of dvmMterpStdRun.
+ *
+ * State registers will be saved to the "thread" area before bailing
+ * debugging purposes
+ */
+ .ent common_gotoBail
+common_gotoBail:
+ SAVE_PC_FP_TO_SELF() # export state to "thread"
+ move a0, rSELF # a0 <- self ptr
+ b dvmMterpStdBail # call(self, changeInterp)
+ .end common_gotoBail
+
+/*
+ * The JIT's invoke method needs to remember the callsite class and
+ * target pair. Save them here so that they are available to
+ * dvmCheckJit following the interpretation of this invoke.
+ */
+#if defined(WITH_JIT)
+save_callsiteinfo:
+ beqz rOBJ, 1f
+ lw rOBJ, offObject_clazz(rOBJ)
+1:
+ sw a0, offThread_methodToCall(rSELF)
+ sw rOBJ, offThread_callsiteClass(rSELF)
+ jr ra
+#endif
+
+/*
+ * Common code for jumbo method invocation.
+ * NOTE: this adjusts rPC to account for the difference in instruction width.
+ * As a result, the savedPc in the stack frame will not be wholly accurate. So
+ * long as that is only used for source file line number calculations, we're
+ * okay.
+ */
+common_invokeMethodJumboNoThis:
+#if defined(WITH_JIT)
+ /* On entry: a0 is "Method* methodToCall */
+ li rOBJ, 0 # clear "this"
+#endif
+common_invokeMethodJumbo:
+ /* On entry: a0 is "Method* methodToCall, rOBJ is "this" */
+.LinvokeNewJumbo:
+#if defined(WITH_JIT)
+ lhu a1, offThread_subMode(rSELF)
+ andi a1, kSubModeJitTraceBuild
+ beqz a1, 1f
+ JAL(save_callsiteinfo)
+#endif
+/* prepare to copy args to "outs" area of current frame */
+1:
+ add rPC, rPC, 4 # adjust pc to make return consistent
+ FETCH(a2, 1)
+ SAVEAREA_FROM_FP(rBIX, rFP) # rBIX <- stack save area
+ beqz a2, .LinvokeArgsDone # if no args, skip the rest
+ FETCH(a1, 2) # a1 <- CCCC
+ b .LinvokeRangeArgs # handle args like invoke range
+
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ * a0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+#if defined(WITH_JIT)
+ lhu a1, offThread_subMode(rSELF)
+ andi a1, kSubModeJitTraceBuild
+ beqz a1, 1f
+ JAL(save_callsiteinfo)
+#endif
+ # prepare to copy args to "outs" area of current frame
+1:
+ GET_OPA(a2)
+ SAVEAREA_FROM_FP(rBIX, rFP) # rBIX <- stack save area
+ beqz a2, .LinvokeArgsDone
+ FETCH(a1, 2) # a1 <- CCCC
+.LinvokeRangeArgs:
+ # a0=methodToCall, a1=CCCC, a2=count, rBIX=outs
+ # (very few methods have > 10 args; could unroll for common cases)
+ EAS2(a3, rFP, a1)
+ sll t0, a2, 2
+ subu rBIX, rBIX, t0
+
+1:
+ lw a1, 0(a3)
+ addu a3, a3, 4
+ subu a2, a2, 1
+ sw a1, 0(rBIX)
+ addu rBIX, 4
+ bnez a2, 1b
+ b .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ * a0 is "Method* methodToCall", "rOBJ is this"
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+#if defined(WITH_JIT)
+ lhu a1, offThread_subMode(rSELF)
+ andi a1, kSubModeJitTraceBuild
+ beqz a1, 1f
+ JAL(save_callsiteinfo)
+#endif
+
+ # prepare to copy args to "outs" area of current frame
+1:
+ GET_OPB(a2)
+ SAVEAREA_FROM_FP(rBIX, rFP)
+ beqz a2, .LinvokeArgsDone
+ FETCH(a1, 2)
+
+ # a0=methodToCall, a1=GFED, a2=count,
+.LinvokeNonRange:
+ beq a2, 0, 0f
+ beq a2, 1, 1f
+ beq a2, 2, 2f
+ beq a2, 3, 3f
+ beq a2, 4, 4f
+ beq a2, 5, 5f
+
+5:
+ and t0, rINST, 0x0f00
+ ESRN(t2, rFP, t0, 6)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+4:
+ and t0, a1, 0xf000
+ ESRN(t2, rFP, t0, 10)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+3:
+ and t0, a1, 0x0f00
+ ESRN(t2, rFP, t0, 6)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+2:
+ and t0, a1, 0x00f0
+ ESRN(t2, rFP, t0, 2)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+1:
+ and t0, a1, 0x000f
+ EASN(t2, rFP, t0, 2)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+0:
+ #fall through .LinvokeArgsDone
+
+
+.LinvokeArgsDone: # a0=methodToCall
+ lhu rOBJ, offMethod_registersSize(a0)
+ lhu a3, offMethod_outsSize(a0)
+ lw a2, offMethod_insns(a0)
+ lw rINST, offMethod_clazz(a0)
+ # find space for the new stack frame, check for overflow
+ SAVEAREA_FROM_FP(a1, rFP) # a1 <- stack save area
+ sll t0, rOBJ, 2 # a1 <- newFp (old savearea - regsSize)
+ subu a1, a1, t0
+ SAVEAREA_FROM_FP(rBIX, a1)
+ lw rOBJ, offThread_interpStackEnd(rSELF) # t3 <- interpStackEnd
+ sll t2, a3, 2
+ subu t0, rBIX, t2
+ lhu ra, offThread_subMode(rSELF)
+ lw a3, offMethod_accessFlags(a0) # a3 <- methodToCall->accessFlags
+ bltu t0, rOBJ, .LstackOverflow # yes, this frame will overflow stack
+
+
+ # set up newSaveArea
+#ifdef EASY_GDB
+ SAVEAREA_FROM_FP(t0, rFP)
+ sw t0, offStackSaveArea_prevSave(rBIX)
+#endif
+ sw rFP, (offStackSaveArea_prevFrame)(rBIX)
+ sw rPC, (offStackSaveArea_savedPc)(rBIX)
+#if defined(WITH_JIT)
+ sw zero, (offStackSaveArea_returnAddr)(rBIX)
+#endif
+ sw a0, (offStackSaveArea_method)(rBIX)
+ # Profiling?
+ bnez ra, 2f
+1:
+ and t2, a3, ACC_NATIVE
+ bnez t2, .LinvokeNative
+ lhu rOBJ, (a2) # rOBJ -< load Inst from New PC
+ lw a3, offClassObject_pDvmDex(rINST)
+ move rPC, a2 # Publish new rPC
+ # Update state values for the new method
+ # a0=methodToCall, a1=newFp, a3=newMethodClass, rOBJ=newINST
+ sw a0, offThread_method(rSELF)
+ sw a3, offThread_methodClassDex(rSELF)
+ li a2, 1
+ sw a2, offThread_debugIsMethodEntry(rSELF)
+
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ move rFP, a1 # fp = newFp
+ GET_PREFETCHED_OPCODE(t0, rOBJ) # extract prefetched opcode from rOBJ
+ move rINST, rOBJ # publish new rINST
+ sw a1, offThread_curFrame(rSELF)
+ bnez a0, common_updateProfile
+ GOTO_OPCODE(t0)
+#else
+ move rFP, a1
+ GET_PREFETCHED_OPCODE(t0, rOBJ)
+ move rINST, rOBJ
+ sw a1, offThread_curFrame(rSELF)
+ GOTO_OPCODE(t0)
+#endif
+
+2:
+ # Profiling - record method entry. a0: methodToCall
+ STACK_STORE(a0, 0)
+ STACK_STORE(a1, 4)
+ STACK_STORE(a2, 8)
+ STACK_STORE(a3, 12)
+ sw rPC, offThread_pc(rSELF) # update interpSave.pc
+ move a1, a0
+ move a0, rSELF
+ JAL(dvmReportInvoke)
+ STACK_LOAD(a3, 12) # restore a0-a3
+ STACK_LOAD(a2, 8)
+ STACK_LOAD(a1, 4)
+ STACK_LOAD(a0, 0)
+ b 1b
+.LinvokeNative:
+ # Prep for the native call
+ # a0=methodToCall, a1=newFp, rBIX=newSaveArea
+ lhu ra, offThread_subMode(rSELF)
+ lw t3, offThread_jniLocal_topCookie(rSELF)
+ sw a1, offThread_curFrame(rSELF)
+ sw t3, offStackSaveArea_localRefCookie(rBIX) # newFp->localRefCookie=top
+ move a2, a0
+ move a0, a1
+ addu a1, rSELF, offThread_retval
+ move a3, rSELF
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ b .Lskip
+ .ent dalvik_mterp
+dalvik_mterp:
+ STACK_STORE_FULL()
+.Lskip:
+#endif
+ bnez ra, 11f # Any special SubModes active?
+ lw t9, offMethod_nativeFunc(a2)
+ jalr t9
+ lw gp, STACK_OFFSET_GP(sp)
+7:
+ # native return; rBIX=newSaveArea
+ # equivalent to dvmPopJniLocals
+ lw a0, offStackSaveArea_localRefCookie(rBIX)
+ lw a1, offThread_exception(rSELF)
+ sw rFP, offThread_curFrame(rSELF)
+ sw a0, offThread_jniLocal_topCookie(rSELF) # new top <- old top
+ bnez a1, common_exceptionThrown
+
+ FETCH_ADVANCE_INST(3)
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+11:
+ # a0=newFp, a1=&retval, a2=methodToCall, a3=self, ra=subModes
+ SCRATCH_STORE(a0, 0)
+ SCRATCH_STORE(a1, 4)
+ SCRATCH_STORE(a2, 8)
+ SCRATCH_STORE(a3, 12)
+ move a0, a2 # a0 <- methodToCall
+ move a1, rSELF
+ move a2, rFP
+ JAL(dvmReportPreNativeInvoke) # (methodToCall, self, fp)
+ SCRATCH_LOAD(a3, 12) # restore a0-a3
+ SCRATCH_LOAD(a2, 8)
+ SCRATCH_LOAD(a1, 4)
+ SCRATCH_LOAD(a0, 0)
+
+ # Call the native method
+ lw t9, offMethod_nativeFunc(a2) # t9<-methodToCall->nativeFunc
+ jalr t9
+ lw gp, STACK_OFFSET_GP(sp)
+
+ # Restore the pre-call arguments
+ SCRATCH_LOAD(a3, 12) # restore a0-a3
+ SCRATCH_LOAD(a2, 8)
+ SCRATCH_LOAD(a1, 4)
+ SCRATCH_LOAD(a0, 0)
+
+ # Finish up any post-invoke subMode requirements
+ move a0, a2
+ move a1, rSELF
+ move a2, rFP
+ JAL(dvmReportPostNativeInvoke) # (methodToCall, self, fp)
+ b 7b
+
+
+.LstackOverflow: # a0=methodToCall
+ move a1, a0 # a1 <- methodToCall
+ move a0, rSELF # a0 <- self
+ JAL(dvmHandleStackOverflow) # dvmHandleStackOverflow(self, methodToCall)
+ b common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+ .end dalvik_mterp
+#endif
+
+ /*
+ * Common code for method invocation, calling through "glue code".
+ *
+ * TODO: now that we have range and non-range invoke handlers, this
+ * needs to be split into two. Maybe just create entry points
+ * that set r9 and jump here?
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ * r9 is "bool methodCallRange", indicating if this is a /range variant
+ */
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+ lhu t0, offThread_subMode(rSELF)
+ SAVEAREA_FROM_FP(a0, rFP)
+ lw rOBJ, offStackSaveArea_savedPc(a0) # rOBJ = saveArea->savedPc
+ bnez t0, 19f
+14:
+ lw rFP, offStackSaveArea_prevFrame(a0) # fp = saveArea->prevFrame
+ lw a2, (offStackSaveArea_method - sizeofStackSaveArea)(rFP)
+ # a2<- method we're returning to
+ # is this a break frame?
+ beqz a2, common_gotoBail # break frame, bail out completely
+
+ lw rBIX, offMethod_clazz(a2) # rBIX<- method->clazz
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+ PREFETCH_ADVANCE_INST(rINST, rOBJ, 3) # advance rOBJ, update new rINST
+ sw a2, offThread_method(rSELF) # self->method = newSave->method
+ lw a1, offClassObject_pDvmDex(rBIX) # r1<- method->clazz->pDvmDex
+ sw rFP, offThread_curFrame(rSELF) # curFrame = fp
+#if defined(WITH_JIT)
+ lw rBIX, offStackSaveArea_returnAddr(a0)
+ move rPC, rOBJ # publish new rPC
+ sw a1, offThread_methodClassDex(rSELF)
+ sw rBIX, offThread_inJitCodeCache(rSELF) # may return to JIT'ed land
+ beqz rBIX, 15f # caller is compiled code
+ move t9, rBIX
+ jalr t9
+ lw gp, STACK_OFFSET_GP(sp)
+15:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ move rPC, rOBJ # publish new rPC
+ sw a1, offThread_methodClassDex(rSELF)
+ GOTO_OPCODE(t0)
+#endif
+
+19:
+ # Handle special actions
+ # On entry, a0: StackSaveArea
+ lw a1, offStackSaveArea_prevFrame(a0) # a1<- prevFP
+ sw rPC, offThread_pc(rSELF) # update interpSave.pc
+ sw a1, offThread_curFrame(rSELF) # update interpSave.curFrame
+ move a0, rSELF
+ JAL(dvmReportReturn)
+ SAVEAREA_FROM_FP(a0, rFP) # restore StackSaveArea
+ b 14b
+
+ .if 0
+ /*
+ * Return handling, calls through "glue code".
+ */
+.LreturnOld:
+ SAVE_PC_FP_TO_SELF() # export state
+ move a0, rSELF # arg to function
+ JAL(dvmMterp_returnFromMethod)
+ b common_resumeAfterGlueCall
+ .endif
+
+/*
+ * Somebody has thrown an exception. Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+ .global dvmMterpCommonExceptionThrown
+dvmMterpCommonExceptionThrown:
+common_exceptionThrown:
+.LexceptionNew:
+
+ EXPORT_PC()
+ move a0, rSELF
+ JAL(dvmCheckSuspendPending)
+ lw rOBJ, offThread_exception(rSELF)
+ move a1, rSELF
+ move a0, rOBJ
+ JAL(dvmAddTrackedAlloc)
+ lhu a2, offThread_subMode(rSELF)
+ sw zero, offThread_exception(rSELF)
+
+ # Special subMode?
+ bnez a2, 7f # any special subMode handling needed?
+8:
+ /* set up args and a local for "&fp" */
+ sw rFP, 20(sp) # store rFP => tmp
+ addu t0, sp, 20 # compute &tmp
+ sw t0, STACK_OFFSET_ARG04(sp) # save it in arg4 as per ABI
+ li a3, 0 # a3 <- false
+ lw a1, offThread_method(rSELF)
+ move a0, rSELF
+ lw a1, offMethod_insns(a1)
+ lhu ra, offThread_subMode(rSELF)
+ move a2, rOBJ
+ subu a1, rPC, a1
+ sra a1, a1, 1
+
+ /* call, r0 gets catchRelPc (a code-unit offset) */
+ JAL(dvmFindCatchBlock) # call(self, relPc, exc, scan?, &fp)
+ lw rFP, 20(sp) # retrieve the updated rFP
+
+ /* update frame pointer and check result from dvmFindCatchBlock */
+ move a0, v0
+ bltz v0, .LnotCaughtLocally
+
+ /* fix earlier stack overflow if necessary; Preserve a0 */
+ lbu a1, offThread_stackOverflowed(rSELF)
+ beqz a1, 1f
+ move rBIX, a0
+ move a0, rSELF
+ move a1, rOBJ
+ JAL(dvmCleanupStackOverflow)
+ move a0, rBIX
+
+1:
+
+/* adjust locals to match self->interpSave.curFrame and updated PC */
+ SAVEAREA_FROM_FP(a1, rFP) # a1<- new save area
+ lw a1, offStackSaveArea_method(a1)
+ sw a1, offThread_method(rSELF)
+ lw a2, offMethod_clazz(a1)
+ lw a3, offMethod_insns(a1)
+ lw a2, offClassObject_pDvmDex(a2)
+ EAS1(rPC, a3, a0)
+ sw a2, offThread_methodClassDex(rSELF)
+
+ /* release the tracked alloc on the exception */
+ move a0, rOBJ
+ move a1, rSELF
+ JAL(dvmReleaseTrackedAlloc)
+
+ /* restore the exception if the handler wants it */
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ bne t0, OP_MOVE_EXCEPTION, 2f
+ sw rOBJ, offThread_exception(rSELF)
+2:
+ GOTO_OPCODE(t0)
+
+ # Manage debugger bookkeeping
+7:
+ sw rPC, offThread_pc(rSELF)
+ sw rFP, offThread_curFrame(rSELF)
+ move a0, rSELF
+ move a1, rOBJ
+ JAL(dvmReportExceptionThrow)
+ b 8b
+
+.LnotCaughtLocally: # rOBJ = exception
+ /* fix stack overflow if necessary */
+ lbu a1, offThread_stackOverflowed(rSELF)
+ beqz a1, 3f
+ move a0, rSELF
+ move a1, rOBJ
+ JAL(dvmCleanupStackOverflow) # dvmCleanupStackOverflow(self, exception)
+
+3:
+ # may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+ /* call __android_log_print(prio, tag, format, ...) */
+ /* "Exception %s from %s:%d not caught locally" */
+ lw a0, offThread_method(rSELF)
+ lw a1, offMethod_insns(a0)
+ subu a1, rPC, a1
+ sra a1, a1, 1
+ JAL(dvmLineNumFromPC)
+ sw v0, 20(sp)
+ # dvmGetMethodSourceFile(method)
+ lw a0, offThread_method(rSELF)
+ JAL(dvmGetMethodSourceFile)
+ sw v0, 16(sp)
+ # exception->clazz->descriptor
+ lw a3, offObject_clazz(rOBJ)
+ lw a3, offClassObject_descriptor(a3)
+ la a2, .LstrExceptionNotCaughtLocally
+ la a1, .LstrLogTag
+ li a0, 3
+ JAL(__android_log_print)
+#endif
+ sw rOBJ, offThread_exception(rSELF)
+ move a0, rOBJ
+ move a1, rSELF
+ JAL(dvmReleaseTrackedAlloc)
+ b common_gotoBail
+
+ /*
+ * Exception handling, calls through "glue code".
+ */
+ .if 0
+.LexceptionOld:
+ SAVE_PC_TO_SELF() # export state
+ SAVE_FP_TO_SELF()
+ move a0, rSELF # arg to function
+ JAL(dvmMterp_exceptionThrown)
+ b common_resumeAfterGlueCall
+ .endif
+
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including the current
+ * instruction.
+ *
+ * On entry:
+ * rBIX: &dvmDex->pResFields[field]
+ * a0: field pointer (must preserve)
+ */
+common_verifyField:
+ lhu a3, offThread_subMode(rSELF)
+ andi a3, kSubModeJitTraceBuild
+ bnez a3, 1f # Not building trace, continue
+ jr ra
+1:
+ lw a1, (rBIX)
+ beqz a1, 2f # resolution complete ?
+ jr ra
+2:
+ SCRATCH_STORE(a0, 0)
+ SCRATCH_STORE(a1, 4)
+ SCRATCH_STORE(a2, 8)
+ SCRATCH_STORE(a3, 12)
+ SCRATCH_STORE(ra, 16)
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) #(self,pc) end trace before this inst)
+ SCRATCH_LOAD(a0, 0)
+ SCRATCH_LOAD(a1, 4)
+ SCRATCH_LOAD(a2, 8)
+ SCRATCH_LOAD(a3, 12)
+ SCRATCH_LOAD(ra, 16)
+ jr ra # return
+#endif
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+ LOAD_PC_FP_FROM_SELF() # pull rPC and rFP out of thread
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh
+ FETCH_INST() # load rINST from rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/*
+ * Invalid array index. Note that our calling convention is strange; we use a1
+ * and a3 because those just happen to be the registers all our callers are
+ * using. We move a3 before calling the C function, but a1 happens to match.
+ * a1: index
+ * a3: size
+ */
+common_errArrayIndex:
+ EXPORT_PC()
+ move a0, a3
+ JAL(dvmThrowArrayIndexOutOfBoundsException)
+ b common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+ la a0, .LstrDivideByZero
+ JAL(dvmThrowArithmeticException)
+ b common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ * On entry: length in a1
+ */
+common_errNegativeArraySize:
+ EXPORT_PC()
+ move a0, a1 # arg0 <- len
+ JAL(dvmThrowNegativeArraySizeException) # (len)
+ b common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ * On entry: method name in a1
+ */
+common_errNoSuchMethod:
+ EXPORT_PC()
+ move a0, a1
+ JAL(dvmThrowNoSuchMethodError)
+ b common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one. We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+ EXPORT_PC()
+ li a0, 0
+ JAL(dvmThrowNullPointerException)
+ b common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault. The source address will be in ra. Use a jal to jump here.
+ */
+common_abort:
+ lw zero,-4(zero) # generate SIGSEGV
+
+/*
+ * Spit out a "we were here", preserving all registers.
+ */
+ .macro SQUEAK num
+common_squeak\num:
+ STACK_STORE_RA();
+ la a0, .LstrSqueak
+ LOAD_IMM(a1, \num);
+ JAL(printf);
+ STACK_LOAD_RA();
+ RETURN;
+ .endm
+
+ SQUEAK 0
+ SQUEAK 1
+ SQUEAK 2
+ SQUEAK 3
+ SQUEAK 4
+ SQUEAK 5
+
+/*
+ * Spit out the number in a0, preserving registers.
+ */
+common_printNum:
+ STACK_STORE_RA()
+ MOVE_REG(a1, a0)
+ la a0, .LstrSqueak
+ JAL(printf)
+ STACK_LOAD_RA()
+ RETURN
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+ STACK_STORE_RA()
+ la a0, .LstrNewline
+ JAL(printf)
+ STACK_LOAD_RA()
+ RETURN
+
+ /*
+ * Print the 32-bit quantity in a0 as a hex value, preserving registers.
+ */
+common_printHex:
+ STACK_STORE_RA()
+ MOVE_REG(a1, a0)
+ la a0, .LstrPrintHex
+ JAL(printf)
+ STACK_LOAD_RA()
+RETURN;
+
+/*
+ * Print the 64-bit quantity in a0-a1, preserving registers.
+ */
+common_printLong:
+ STACK_STORE_RA()
+ MOVE_REG(a3, a1)
+ MOVE_REG(a2, a0)
+ la a0, .LstrPrintLong
+ JAL(printf)
+ STACK_LOAD_RA()
+ RETURN;
+
+/*
+ * Print full method info. Pass the Method* in a0. Preserves regs.
+ */
+common_printMethod:
+ STACK_STORE_RA()
+ JAL(dvmMterpPrintMethod)
+ STACK_LOAD_RA()
+ RETURN
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info. Requires the C function to be compiled in.
+ */
+ .if 0
+common_dumpRegs:
+ STACK_STORE_RA()
+ JAL(dvmMterpDumpMipsRegs)
+ STACK_LOAD_RA()
+ RETURN
+ .endif
+
+/*
+ * Zero-terminated ASCII string data.
+ */
+ .data
+
+.LstrBadEntryPoint:
+ .asciiz "Bad entry point %d\n"
+.LstrDivideByZero:
+ .asciiz "divide by zero"
+.LstrFilledNewArrayNotImpl:
+ .asciiz "filled-new-array only implemented for 'int'"
+.LstrLogTag:
+ .asciiz "mterp"
+.LstrExceptionNotCaughtLocally:
+ .asciiz "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+ .asciiz "\n"
+.LstrSqueak:
+ .asciiz "<%d>"
+.LstrPrintHex:
+ .asciiz "<0x%x>"
+.LstrPrintLong:
+ .asciiz "<%lld>"
diff --git a/vm/mterp/mips/header.S b/vm/mterp/mips/header.S
new file mode 100644
index 000000000..0f03599f6
--- /dev/null
+++ b/vm/mterp/mips/header.S
@@ -0,0 +1,345 @@
+#include "../common/asm-constants.h"
+#include "../common/mips-defines.h"
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+
+#ifdef __mips_hard_float
+#define HARD_FLOAT
+#else
+#define SOFT_FLOAT
+#endif
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32R2
+#endif
+
+/* MIPS definitions and declarations
+
+ reg nick purpose
+ s0 rPC interpreted program counter, used for fetching instructions
+ s1 rFP interpreted frame pointer, used for accessing locals and args
+ s2 rSELF self (Thread) pointer
+ s3 rIBASE interpreted instruction base pointer, used for computed goto
+ s4 rINST first 16-bit code unit of current instruction
+*/
+
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rBIX s6
+#define rTEMP s7
+
+/* The long arguments sent to function calls in Big-endian mode should be register
+swapped when sent to functions in little endian mode. In other words long variable
+sent as a0(MSW), a1(LSW) for a function call in LE mode should be sent as a1, a0 in
+Big Endian mode */
+
+#ifdef HAVE_LITTLE_ENDIAN
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+#else
+#define rARG0 a1
+#define rARG1 a0
+#define rARG2 a3
+#define rARG3 a2
+#define rRESULT0 v1
+#define rRESULT1 v0
+#endif
+
+
+/* save/restore the PC and/or FP from the glue struct */
+#define LOAD_PC_FROM_SELF() lw rPC, offThread_pc(rSELF)
+#define SAVE_PC_TO_SELF() sw rPC, offThread_pc(rSELF)
+#define LOAD_FP_FROM_SELF() lw rFP, offThread_curFrame(rSELF)
+#define SAVE_FP_TO_SELF() sw rFP, offThread_curFrame(rSELF)
+#define LOAD_PC_FP_FROM_SELF() \
+ LOAD_PC_FROM_SELF(); \
+ LOAD_FP_FROM_SELF()
+#define SAVE_PC_FP_TO_SELF() \
+ SAVE_PC_TO_SELF(); \
+ SAVE_FP_TO_SELF()
+
+#define EXPORT_PC() \
+ sw rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+
+#define SAVEAREA_FROM_FP(rd, _fpreg) \
+ subu rd, _fpreg, sizeofStackSaveArea
+
+#define FETCH_INST() lhu rINST, (rPC)
+
+#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+ addu rPC, rPC, ((_count) * 2)
+
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+ lhu _dreg, ((_count)*2)(_sreg) ; \
+ addu _sreg, _sreg, (_count)*2
+
+#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+ lhu rINST, (rPC)
+
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define FETCH_B(rd, _count) lbu rd, ((_count) * 2)(rPC)
+#define FETCH_C(rd, _count) lbu rd, ((_count) * 2 + 1)(rPC)
+
+#else
+
+#define FETCH_B(rd, _count) lbu rd, ((_count) * 2 + 1)(rPC)
+#define FETCH_C(rd, _count) lbu rd, ((_count) * 2)(rPC)
+
+#endif
+
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+
+#define GET_PREFETCHED_OPCODE(dreg, sreg) andi dreg, sreg, 255
+
+#define GOTO_OPCODE(rd) sll rd, rd, ${handler_size_bits}; \
+ addu rd, rIBASE, rd; \
+ jr rd
+
+#define GOTO_OPCODE_BASE(_base, rd) sll rd, rd, ${handler_size_bits}; \
+ addu rd, _base, rd; \
+ jr rd
+
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+ .set noat; l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix) STORE_eas2(rd, rFP, rix)
+
+#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
+ sll dst, dst, ${handler_size_bits}; \
+ addu dst, rIBASE, dst; \
+ sll t8, rix, 2; \
+ addu t8, t8, rFP; \
+ jr dst; \
+ sw rd, 0(t8); \
+ .set reorder
+
+#define SET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+ .set noat; s.s rd, (AT); .set at
+
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifndef MIPS32R2
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#else
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+#define LOAD_rSELF_OFF(rd, off) lw rd, offThread_##off## (rSELF)
+
+#define LOAD_rSELF_method(rd) LOAD_rSELF_OFF(rd, method)
+#define LOAD_rSELF_methodClassDex(rd) LOAD_rSELF_OFF(rd, methodClassDex)
+#define LOAD_rSELF_interpStackEnd(rd) LOAD_rSELF_OFF(rd, interpStackEnd)
+#define LOAD_rSELF_retval(rd) LOAD_rSELF_OFF(rd, retval)
+#define LOAD_rSELF_pActiveProfilers(rd) LOAD_rSELF_OFF(rd, pActiveProfilers)
+#define LOAD_rSELF_bailPtr(rd) LOAD_rSELF_OFF(rd, bailPtr)
+#define LOAD_rSELF_SelfSuspendCount(rd) LOAD_rSELF_OFF(rd, SelfSuspendCount)
+
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd, rbase, roff, rshift) .set noat; \
+ sll AT, roff, rshift; \
+ addu rd, rbase, AT; \
+ .set at
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd, rbase, roff, rshift) .set noat; \
+ srl AT, roff, rshift; \
+ addu rd, rbase, AT; \
+ .set at
+
+#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+ .set noat; lw rd, 0(AT); .set at
+
+#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+ .set noat; sw rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define LOADu2_RB_OFF(rd, rbase, off) lhu rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+ sw rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+ lw rhi, (off+4)(rbase)
+
+#define vSTORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+ sw rhi, (off+4)(rbase)
+#define vLOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+ lw rhi, (off+4)(rbase)
+
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+ s.s rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+ l.s rhi, (off+4)(rbase)
+#else
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, (off+4)(rbase); \
+ sw rhi, (off)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, (off+4)(rbase); \
+ lw rhi, (off)(rbase)
+#define vSTORE64_off(rlo, rhi, rbase, off) sw rlo, (off+4)(rbase); \
+ sw rhi, (off)(rbase)
+#define vLOAD64_off(rlo, rhi, rbase, off) lw rlo, (off+4)(rbase); \
+ lw rhi, (off)(rbase)
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, (off+4)(rbase); \
+ s.s rhi, (off)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, (off+4)(rbase); \
+ l.s rhi, (off)(rbase)
+#endif
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#define vSTORE64(rlo, rhi, rbase) vSTORE64_off(rlo, rhi, rbase, 0)
+#define vLOAD64(rlo, rhi, rbase) vLOAD64_off(rlo, rhi, rbase, 0)
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+#define STORE64_lo(rd, rbase) sw rd, 0(rbase)
+#define STORE64_hi(rd, rbase) sw rd, 4(rbase)
+
+
+#define LOAD_offThread_exception(rd, rbase) LOAD_RB_OFF(rd, rbase, offThread_exception)
+#define LOAD_base_offArrayObject_length(rd, rbase) LOAD_RB_OFF(rd, rbase, offArrayObject_length)
+#define LOAD_base_offClassObject_accessFlags(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_accessFlags)
+#define LOAD_base_offClassObject_descriptor(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_descriptor)
+#define LOAD_base_offClassObject_super(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_super)
+
+#define LOAD_base_offClassObject_vtable(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_vtable)
+#define LOAD_base_offClassObject_vtableCount(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_vtableCount)
+#define LOAD_base_offDvmDex_pResClasses(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResClasses)
+#define LOAD_base_offDvmDex_pResFields(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResFields)
+
+#define LOAD_base_offDvmDex_pResMethods(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResMethods)
+#define LOAD_base_offDvmDex_pResStrings(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResStrings)
+#define LOAD_base_offInstField_byteOffset(rd, rbase) LOAD_RB_OFF(rd, rbase, offInstField_byteOffset)
+#define LOAD_base_offStaticField_value(rd, rbase) LOAD_RB_OFF(rd, rbase, offStaticField_value)
+#define LOAD_base_offMethod_clazz(rd, rbase) LOAD_RB_OFF(rd, rbase, offMethod_clazz)
+
+#define LOAD_base_offMethod_name(rd, rbase) LOAD_RB_OFF(rd, rbase, offMethod_name)
+#define LOAD_base_offObject_clazz(rd, rbase) LOAD_RB_OFF(rd, rbase, offObject_clazz)
+
+#define LOADu2_offMethod_methodIndex(rd, rbase) LOADu2_RB_OFF(rd, rbase, offMethod_methodIndex)
+
+
+#define STORE_offThread_exception(rd, rbase) STORE_RB_OFF(rd, rbase, offThread_exception)
+
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define SAVE_RA(offset) STACK_STORE(ra, offset)
+#define LOAD_RA(offset) STACK_LOAD(ra, offset)
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define RETURN jr ra
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_SCR 32
+#define STACK_OFFSET_SCRMX 80
+#define STACK_OFFSET_GP 84
+#define STACK_OFFSET_rFP 112
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+#define STACK_STORE_RA() CREATE_STACK(STACK_SIZE); \
+ STACK_STORE(gp, STACK_OFFSET_GP); \
+ STACK_STORE(ra, 124)
+
+#define STACK_STORE_S0() STACK_STORE_RA(); \
+ STACK_STORE(s0, 116)
+
+#define STACK_STORE_S0S1() STACK_STORE_S0(); \
+ STACK_STORE(s1, STACK_OFFSET_rFP)
+
+#define STACK_LOAD_RA() STACK_LOAD(ra, 124); \
+ STACK_LOAD(gp, STACK_OFFSET_GP); \
+ DELETE_STACK(STACK_SIZE)
+
+#define STACK_LOAD_S0() STACK_LOAD(s0, 116); \
+ STACK_LOAD_RA()
+
+#define STACK_LOAD_S0S1() STACK_LOAD(s1, STACK_OFFSET_rFP); \
+ STACK_LOAD_S0()
+
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+ STACK_STORE(ra, 124); \
+ STACK_STORE(fp, 120); \
+ STACK_STORE(s0, 116); \
+ STACK_STORE(s1, STACK_OFFSET_rFP); \
+ STACK_STORE(s2, 108); \
+ STACK_STORE(s3, 104); \
+ STACK_STORE(s4, 100); \
+ STACK_STORE(s5, 96); \
+ STACK_STORE(s6, 92); \
+ STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+ STACK_LOAD(s7, 88); \
+ STACK_LOAD(s6, 92); \
+ STACK_LOAD(s5, 96); \
+ STACK_LOAD(s4, 100); \
+ STACK_LOAD(s3, 104); \
+ STACK_LOAD(s2, 108); \
+ STACK_LOAD(s1, STACK_OFFSET_rFP); \
+ STACK_LOAD(s0, 116); \
+ STACK_LOAD(fp, 120); \
+ STACK_LOAD(ra, 124); \
+ DELETE_STACK(STACK_SIZE)
+
+/*
+ * first 8 words are reserved for function calls
+ * Maximum offset is STACK_OFFSET_SCRMX-STACK_OFFSET_SCR
+ */
+#define SCRATCH_STORE(r,off) \
+ STACK_STORE(r, STACK_OFFSET_SCR+off);
+#define SCRATCH_LOAD(r,off) \
+ STACK_LOAD(r, STACK_OFFSET_SCR+off);
+
+#if defined(WITH_JIT)
+#include "../common/jit-config.h"
+#endif
diff --git a/vm/mterp/mips/platform.S b/vm/mterp/mips/platform.S
new file mode 100644
index 000000000..ec1e3eed9
--- /dev/null
+++ b/vm/mterp/mips/platform.S
@@ -0,0 +1,32 @@
+/*
+ * ===========================================================================
+ * CPU-version-specific defines
+ * ===========================================================================
+ */
+
+#if !defined(ANDROID_SMP)
+# error "Must define ANDROID_SMP"
+#endif
+
+/*
+ * Macro for data memory barrier.
+ */
+.macro SMP_DMB
+#if ANDROID_SMP != 0
+ sync
+#else
+ /* not SMP */
+#endif
+.endm
+
+/*
+ * Macro for data memory barrier (store/store variant).
+ */
+.macro SMP_DMB_ST
+#if ANDROID_SMP != 0
+ // FIXME: Is this really needed?
+ sync
+#else
+ /* not SMP */
+#endif
+.endm
diff --git a/vm/mterp/mips/stub.S b/vm/mterp/mips/stub.S
new file mode 100644
index 000000000..fad2238d0
--- /dev/null
+++ b/vm/mterp/mips/stub.S
@@ -0,0 +1,10 @@
+ /* (stub) */
+ SAVE_PC_TO_SELF() # only need to export PC and FP
+ SAVE_FP_TO_SELF()
+ move a0, rSELF # self is first arg to function
+ JAL(dvmMterp_${opcode}) # call
+ LOAD_PC_FROM_SELF() # retrieve updated values
+ LOAD_FP_FROM_SELF()
+ FETCH_INST() # load next instruction from rPC
+ GET_INST_OPCODE(t0) # ...trim down to just the opcode
+ GOTO_OPCODE(t0) # ...and jump to the handler
diff --git a/vm/mterp/mips/unflop.S b/vm/mterp/mips/unflop.S
new file mode 100644
index 000000000..9018bc933
--- /dev/null
+++ b/vm/mterp/mips/unflop.S
@@ -0,0 +1,32 @@
+%default {"preinstr":""}
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t0 <- A+
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, a3) # a0 <- vB
+#else
+ GET_VREG_F(fa0, a3)
+#endif
+ $preinstr # optional op
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifdef SOFT_FLOAT
+ $instr # a0 <- op, a0-a3 changed
+
+.L${opcode}_set_vreg:
+ SET_VREG(v0, rOBJ) # vAA <- result0
+#else
+ $instr_f
+
+.L${opcode}_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ)
+#endif
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GOTO_OPCODE(t1) # jump to next instruction
+ /* 9-10 instructions */
diff --git a/vm/mterp/mips/unflopWide.S b/vm/mterp/mips/unflopWide.S
new file mode 100644
index 000000000..3411c2e64
--- /dev/null
+++ b/vm/mterp/mips/unflopWide.S
@@ -0,0 +1,32 @@
+%default {"preinstr":"", "ld_arg":"LOAD64_F(fa0, fa0f, a3)", "st_result":"STORE64_F(fv0, fv0f, rOBJ)"}
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be a MIPS instruction or a function call.
+ *
+ * long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # t1 <- &fp[A]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a3) # a0/a1 <- vAA
+#else
+ $ld_arg
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # a0/a1 <- op, a2-a3 changed
+
+.L${opcode}_set_vreg:
+#ifdef SOFT_FLOAT
+ STORE64(rRESULT0, rRESULT1, rOBJ) # vAA <- a0/a1
+#else
+ $st_result # vAA <- a0/a1
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
diff --git a/vm/mterp/mips/unflopWider.S b/vm/mterp/mips/unflopWider.S
new file mode 100644
index 000000000..f6d57184b
--- /dev/null
+++ b/vm/mterp/mips/unflopWider.S
@@ -0,0 +1,33 @@
+%default {"preinstr":"", "st_result":"STORE64_F(fv0, fv0f, rOBJ)"}
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, a3) # a0 <- vB
+#else
+ GET_VREG_F(fa0, a3)
+#endif
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+#ifdef SOFT_FLOAT
+ $instr # result <- op, a0-a3 changed
+
+.L${opcode}_set_vreg:
+ STORE64(rRESULT0, rRESULT1, rOBJ) # vA/vA+1 <- a0/a1
+#else
+ $instr_f
+
+.L${opcode}_set_vreg:
+ $st_result # vA/vA+1 <- a0/a1
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
diff --git a/vm/mterp/mips/unop.S b/vm/mterp/mips/unop.S
new file mode 100644
index 000000000..52a8f0ac9
--- /dev/null
+++ b/vm/mterp/mips/unop.S
@@ -0,0 +1,19 @@
+%default {"preinstr":"", "result0":"a0"}
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO($result0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
diff --git a/vm/mterp/mips/unopNarrower.S b/vm/mterp/mips/unopNarrower.S
new file mode 100644
index 000000000..85a94b71e
--- /dev/null
+++ b/vm/mterp/mips/unopNarrower.S
@@ -0,0 +1,37 @@
+%default {"preinstr":"", "load":"LOAD64_F(fa0, fa0f, a3)"}
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0/a1", where
+ * "result" is a 32-bit quantity in a0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ * If hard floating point support is available, use fa0 as the parameter, except for
+ * long-to-float opcode.
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t1 <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a3) # a0/a1 <- vB/vB+1
+#else
+ $load
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+#ifdef SOFT_FLOAT
+ $instr # a0 <- op, a0-a3 changed
+
+.L${opcode}_set_vreg:
+ SET_VREG(v0, rOBJ) # vA <- result0
+#else
+ $instr_f
+
+.L${opcode}_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ) # vA <- result0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
diff --git a/vm/mterp/mips/unopWide.S b/vm/mterp/mips/unopWide.S
new file mode 100644
index 000000000..00e4e17d1
--- /dev/null
+++ b/vm/mterp/mips/unopWide.S
@@ -0,0 +1,22 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1"}
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be MIPS instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(t1) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ EAS2(rOBJ, rFP, t1) # rOBJ <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # a0/a1 <- op, a2-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64($result0, $result1, rOBJ) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
diff --git a/vm/mterp/mips/unopWider.S b/vm/mterp/mips/unopWider.S
new file mode 100644
index 000000000..f601c1196
--- /dev/null
+++ b/vm/mterp/mips/unopWider.S
@@ -0,0 +1,20 @@
+%default {"preinstr":"", "result0":"a0", "result1":"a1"}
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(t1) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, a3) # a0 <- vB
+ EAS2(rOBJ, rFP, t1) # rOBJ <- &fp[A]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ $preinstr # optional op
+ $instr # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64($result0, $result1, rOBJ) # vA/vA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
diff --git a/vm/mterp/mips/unused.S b/vm/mterp/mips/unused.S
new file mode 100644
index 000000000..d91dafb93
--- /dev/null
+++ b/vm/mterp/mips/unused.S
@@ -0,0 +1,2 @@
+ BAL(common_abort)
+
diff --git a/vm/mterp/mips/zcmp.S b/vm/mterp/mips/zcmp.S
new file mode 100644
index 000000000..aaac52dc1
--- /dev/null
+++ b/vm/mterp/mips/zcmp.S
@@ -0,0 +1,33 @@
+%verify "branch taken"
+%verify "branch not taken"
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(a1, 1) # a1 <- branch offset, in code units
+ b${revcmp} a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a1, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a1, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh table base
+3:
+ bnez a0, common_updateProfile # test for JIT off at target
+#else
+ bgez a1, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rtable base
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
diff --git a/vm/mterp/out/InterpAsm-mips.S b/vm/mterp/out/InterpAsm-mips.S
new file mode 100644
index 000000000..dbb488b28
--- /dev/null
+++ b/vm/mterp/out/InterpAsm-mips.S
@@ -0,0 +1,29959 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'mips'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: mips/header.S */
+#include "../common/asm-constants.h"
+#include "../common/mips-defines.h"
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+
+#ifdef __mips_hard_float
+#define HARD_FLOAT
+#else
+#define SOFT_FLOAT
+#endif
+
+#if (__mips==32) && (__mips_isa_rev>=2)
+#define MIPS32R2
+#endif
+
+/* MIPS definitions and declarations
+
+ reg nick purpose
+ s0 rPC interpreted program counter, used for fetching instructions
+ s1 rFP interpreted frame pointer, used for accessing locals and args
+ s2 rSELF self (Thread) pointer
+ s3 rIBASE interpreted instruction base pointer, used for computed goto
+ s4 rINST first 16-bit code unit of current instruction
+*/
+
+
+/* single-purpose registers, given names for clarity */
+#define rPC s0
+#define rFP s1
+#define rSELF s2
+#define rIBASE s3
+#define rINST s4
+#define rOBJ s5
+#define rBIX s6
+#define rTEMP s7
+
+/* The long arguments sent to function calls in Big-endian mode should be register
+swapped when sent to functions in little endian mode. In other words long variable
+sent as a0(MSW), a1(LSW) for a function call in LE mode should be sent as a1, a0 in
+Big Endian mode */
+
+#ifdef HAVE_LITTLE_ENDIAN
+#define rARG0 a0
+#define rARG1 a1
+#define rARG2 a2
+#define rARG3 a3
+#define rRESULT0 v0
+#define rRESULT1 v1
+#else
+#define rARG0 a1
+#define rARG1 a0
+#define rARG2 a3
+#define rARG3 a2
+#define rRESULT0 v1
+#define rRESULT1 v0
+#endif
+
+
+/* save/restore the PC and/or FP from the glue struct */
+#define LOAD_PC_FROM_SELF() lw rPC, offThread_pc(rSELF)
+#define SAVE_PC_TO_SELF() sw rPC, offThread_pc(rSELF)
+#define LOAD_FP_FROM_SELF() lw rFP, offThread_curFrame(rSELF)
+#define SAVE_FP_TO_SELF() sw rFP, offThread_curFrame(rSELF)
+#define LOAD_PC_FP_FROM_SELF() \
+ LOAD_PC_FROM_SELF(); \
+ LOAD_FP_FROM_SELF()
+#define SAVE_PC_FP_TO_SELF() \
+ SAVE_PC_TO_SELF(); \
+ SAVE_FP_TO_SELF()
+
+#define EXPORT_PC() \
+ sw rPC, (offStackSaveArea_currentPc - sizeofStackSaveArea)(rFP)
+
+#define SAVEAREA_FROM_FP(rd, _fpreg) \
+ subu rd, _fpreg, sizeofStackSaveArea
+
+#define FETCH_INST() lhu rINST, (rPC)
+
+#define FETCH_ADVANCE_INST(_count) lhu rINST, ((_count)*2)(rPC); \
+ addu rPC, rPC, ((_count) * 2)
+
+#define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \
+ lhu _dreg, ((_count)*2)(_sreg) ; \
+ addu _sreg, _sreg, (_count)*2
+
+#define FETCH_ADVANCE_INST_RB(rd) addu rPC, rPC, rd; \
+ lhu rINST, (rPC)
+
+#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
+#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define FETCH_B(rd, _count) lbu rd, ((_count) * 2)(rPC)
+#define FETCH_C(rd, _count) lbu rd, ((_count) * 2 + 1)(rPC)
+
+#else
+
+#define FETCH_B(rd, _count) lbu rd, ((_count) * 2 + 1)(rPC)
+#define FETCH_C(rd, _count) lbu rd, ((_count) * 2)(rPC)
+
+#endif
+
+#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
+
+/*
+ * Put the prefetched instruction's opcode field into the specified register.
+ */
+
+#define GET_PREFETCHED_OPCODE(dreg, sreg) andi dreg, sreg, 255
+
+#define GOTO_OPCODE(rd) sll rd, rd, 7; \
+ addu rd, rIBASE, rd; \
+ jr rd
+
+#define GOTO_OPCODE_BASE(_base, rd) sll rd, rd, 7; \
+ addu rd, _base, rd; \
+ jr rd
+
+#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
+
+#define GET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+ .set noat; l.s rd, (AT); .set at
+
+#define SET_VREG(rd, rix) STORE_eas2(rd, rFP, rix)
+
+#define SET_VREG_GOTO(rd, rix, dst) .set noreorder; \
+ sll dst, dst, 7; \
+ addu dst, rIBASE, dst; \
+ sll t8, rix, 2; \
+ addu t8, t8, rFP; \
+ jr dst; \
+ sw rd, 0(t8); \
+ .set reorder
+
+#define SET_VREG_F(rd, rix) EAS2(AT, rFP, rix); \
+ .set noat; s.s rd, (AT); .set at
+
+
+#define GET_OPA(rd) srl rd, rINST, 8
+#ifndef MIPS32R2
+#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
+#else
+#define GET_OPA4(rd) ext rd, rINST, 8, 4
+#endif
+#define GET_OPB(rd) srl rd, rINST, 12
+
+#define LOAD_rSELF_OFF(rd, off) lw rd, offThread_##off## (rSELF)
+
+#define LOAD_rSELF_method(rd) LOAD_rSELF_OFF(rd, method)
+#define LOAD_rSELF_methodClassDex(rd) LOAD_rSELF_OFF(rd, methodClassDex)
+#define LOAD_rSELF_interpStackEnd(rd) LOAD_rSELF_OFF(rd, interpStackEnd)
+#define LOAD_rSELF_retval(rd) LOAD_rSELF_OFF(rd, retval)
+#define LOAD_rSELF_pActiveProfilers(rd) LOAD_rSELF_OFF(rd, pActiveProfilers)
+#define LOAD_rSELF_bailPtr(rd) LOAD_rSELF_OFF(rd, bailPtr)
+#define LOAD_rSELF_SelfSuspendCount(rd) LOAD_rSELF_OFF(rd, SelfSuspendCount)
+
+
+/*
+ * Form an Effective Address rd = rbase + roff<<n;
+ * Uses reg AT
+ */
+#define EASN(rd, rbase, roff, rshift) .set noat; \
+ sll AT, roff, rshift; \
+ addu rd, rbase, AT; \
+ .set at
+
+#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
+#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
+#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
+#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
+
+/*
+ * Form an Effective Shift Right rd = rbase + roff>>n;
+ * Uses reg AT
+ */
+#define ESRN(rd, rbase, roff, rshift) .set noat; \
+ srl AT, roff, rshift; \
+ addu rd, rbase, AT; \
+ .set at
+
+#define LOAD_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+ .set noat; lw rd, 0(AT); .set at
+
+#define STORE_eas2(rd, rbase, roff) EAS2(AT, rbase, roff); \
+ .set noat; sw rd, 0(AT); .set at
+
+#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
+#define LOADu2_RB_OFF(rd, rbase, off) lhu rd, off(rbase)
+#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
+
+#ifdef HAVE_LITTLE_ENDIAN
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+ sw rhi, (off+4)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+ lw rhi, (off+4)(rbase)
+
+#define vSTORE64_off(rlo, rhi, rbase, off) sw rlo, off(rbase); \
+ sw rhi, (off+4)(rbase)
+#define vLOAD64_off(rlo, rhi, rbase, off) lw rlo, off(rbase); \
+ lw rhi, (off+4)(rbase)
+
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, off(rbase); \
+ s.s rhi, (off+4)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, off(rbase); \
+ l.s rhi, (off+4)(rbase)
+#else
+
+#define STORE64_off(rlo, rhi, rbase, off) sw rlo, (off+4)(rbase); \
+ sw rhi, (off)(rbase)
+#define LOAD64_off(rlo, rhi, rbase, off) lw rlo, (off+4)(rbase); \
+ lw rhi, (off)(rbase)
+#define vSTORE64_off(rlo, rhi, rbase, off) sw rlo, (off+4)(rbase); \
+ sw rhi, (off)(rbase)
+#define vLOAD64_off(rlo, rhi, rbase, off) lw rlo, (off+4)(rbase); \
+ lw rhi, (off)(rbase)
+#define STORE64_off_F(rlo, rhi, rbase, off) s.s rlo, (off+4)(rbase); \
+ s.s rhi, (off)(rbase)
+#define LOAD64_off_F(rlo, rhi, rbase, off) l.s rlo, (off+4)(rbase); \
+ l.s rhi, (off)(rbase)
+#endif
+
+#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
+#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
+
+#define vSTORE64(rlo, rhi, rbase) vSTORE64_off(rlo, rhi, rbase, 0)
+#define vLOAD64(rlo, rhi, rbase) vLOAD64_off(rlo, rhi, rbase, 0)
+
+#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
+#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
+
+#define STORE64_lo(rd, rbase) sw rd, 0(rbase)
+#define STORE64_hi(rd, rbase) sw rd, 4(rbase)
+
+
+#define LOAD_offThread_exception(rd, rbase) LOAD_RB_OFF(rd, rbase, offThread_exception)
+#define LOAD_base_offArrayObject_length(rd, rbase) LOAD_RB_OFF(rd, rbase, offArrayObject_length)
+#define LOAD_base_offClassObject_accessFlags(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_accessFlags)
+#define LOAD_base_offClassObject_descriptor(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_descriptor)
+#define LOAD_base_offClassObject_super(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_super)
+
+#define LOAD_base_offClassObject_vtable(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_vtable)
+#define LOAD_base_offClassObject_vtableCount(rd, rbase) LOAD_RB_OFF(rd, rbase, offClassObject_vtableCount)
+#define LOAD_base_offDvmDex_pResClasses(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResClasses)
+#define LOAD_base_offDvmDex_pResFields(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResFields)
+
+#define LOAD_base_offDvmDex_pResMethods(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResMethods)
+#define LOAD_base_offDvmDex_pResStrings(rd, rbase) LOAD_RB_OFF(rd, rbase, offDvmDex_pResStrings)
+#define LOAD_base_offInstField_byteOffset(rd, rbase) LOAD_RB_OFF(rd, rbase, offInstField_byteOffset)
+#define LOAD_base_offStaticField_value(rd, rbase) LOAD_RB_OFF(rd, rbase, offStaticField_value)
+#define LOAD_base_offMethod_clazz(rd, rbase) LOAD_RB_OFF(rd, rbase, offMethod_clazz)
+
+#define LOAD_base_offMethod_name(rd, rbase) LOAD_RB_OFF(rd, rbase, offMethod_name)
+#define LOAD_base_offObject_clazz(rd, rbase) LOAD_RB_OFF(rd, rbase, offObject_clazz)
+
+#define LOADu2_offMethod_methodIndex(rd, rbase) LOADu2_RB_OFF(rd, rbase, offMethod_methodIndex)
+
+
+#define STORE_offThread_exception(rd, rbase) STORE_RB_OFF(rd, rbase, offThread_exception)
+
+
+#define STACK_STORE(rd, off) sw rd, off(sp)
+#define STACK_LOAD(rd, off) lw rd, off(sp)
+#define CREATE_STACK(n) subu sp, sp, n
+#define DELETE_STACK(n) addu sp, sp, n
+
+#define SAVE_RA(offset) STACK_STORE(ra, offset)
+#define LOAD_RA(offset) STACK_LOAD(ra, offset)
+
+#define LOAD_ADDR(dest, addr) la dest, addr
+#define LOAD_IMM(dest, imm) li dest, imm
+#define MOVE_REG(dest, src) move dest, src
+#define RETURN jr ra
+#define STACK_SIZE 128
+
+#define STACK_OFFSET_ARG04 16
+#define STACK_OFFSET_ARG05 20
+#define STACK_OFFSET_ARG06 24
+#define STACK_OFFSET_ARG07 28
+#define STACK_OFFSET_SCR 32
+#define STACK_OFFSET_SCRMX 80
+#define STACK_OFFSET_GP 84
+#define STACK_OFFSET_rFP 112
+
+#define JAL(n) jal n
+#define BAL(n) bal n
+
+#define STACK_STORE_RA() CREATE_STACK(STACK_SIZE); \
+ STACK_STORE(gp, STACK_OFFSET_GP); \
+ STACK_STORE(ra, 124)
+
+#define STACK_STORE_S0() STACK_STORE_RA(); \
+ STACK_STORE(s0, 116)
+
+#define STACK_STORE_S0S1() STACK_STORE_S0(); \
+ STACK_STORE(s1, STACK_OFFSET_rFP)
+
+#define STACK_LOAD_RA() STACK_LOAD(ra, 124); \
+ STACK_LOAD(gp, STACK_OFFSET_GP); \
+ DELETE_STACK(STACK_SIZE)
+
+#define STACK_LOAD_S0() STACK_LOAD(s0, 116); \
+ STACK_LOAD_RA()
+
+#define STACK_LOAD_S0S1() STACK_LOAD(s1, STACK_OFFSET_rFP); \
+ STACK_LOAD_S0()
+
+#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
+ STACK_STORE(ra, 124); \
+ STACK_STORE(fp, 120); \
+ STACK_STORE(s0, 116); \
+ STACK_STORE(s1, STACK_OFFSET_rFP); \
+ STACK_STORE(s2, 108); \
+ STACK_STORE(s3, 104); \
+ STACK_STORE(s4, 100); \
+ STACK_STORE(s5, 96); \
+ STACK_STORE(s6, 92); \
+ STACK_STORE(s7, 88);
+
+#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
+ STACK_LOAD(s7, 88); \
+ STACK_LOAD(s6, 92); \
+ STACK_LOAD(s5, 96); \
+ STACK_LOAD(s4, 100); \
+ STACK_LOAD(s3, 104); \
+ STACK_LOAD(s2, 108); \
+ STACK_LOAD(s1, STACK_OFFSET_rFP); \
+ STACK_LOAD(s0, 116); \
+ STACK_LOAD(fp, 120); \
+ STACK_LOAD(ra, 124); \
+ DELETE_STACK(STACK_SIZE)
+
+/*
+ * first 8 words are reserved for function calls
+ * Maximum offset is STACK_OFFSET_SCRMX-STACK_OFFSET_SCR
+ */
+#define SCRATCH_STORE(r,off) \
+ STACK_STORE(r, STACK_OFFSET_SCR+off);
+#define SCRATCH_LOAD(r,off) \
+ STACK_LOAD(r, STACK_OFFSET_SCR+off);
+
+#if defined(WITH_JIT)
+#include "../common/jit-config.h"
+#endif
+
+/* File: mips/platform.S */
+/*
+ * ===========================================================================
+ * CPU-version-specific defines
+ * ===========================================================================
+ */
+
+#if !defined(ANDROID_SMP)
+# error "Must define ANDROID_SMP"
+#endif
+
+/*
+ * Macro for data memory barrier.
+ */
+.macro SMP_DMB
+#if ANDROID_SMP != 0
+ sync
+#else
+ /* not SMP */
+#endif
+.endm
+
+/*
+ * Macro for data memory barrier (store/store variant).
+ */
+.macro SMP_DMB_ST
+#if ANDROID_SMP != 0
+ // FIXME: Is this really needed?
+ sync
+#else
+ /* not SMP */
+#endif
+.endm
+
+/* File: mips/entry.S */
+
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Interpreter entry point.
+ */
+
+#define ASSIST_DEBUGGER 1
+
+ .text
+ .align 2
+ .global dvmMterpStdRun
+ .ent dvmMterpStdRun
+ .frame sp, STACK_SIZE, ra
+/*
+ * On entry:
+ * r0 Thread* self
+ *
+ * The return comes via a call to dvmMterpStdBail().
+ */
+
+dvmMterpStdRun:
+ .set noreorder
+ .cpload t9
+ .set reorder
+/* Save to the stack. Frame size = STACK_SIZE */
+ STACK_STORE_FULL()
+/* This directive will make sure all subsequent jal restore gp at a known offset */
+ .cprestore STACK_OFFSET_GP
+
+ addu fp, sp, STACK_SIZE # Move Frame Pointer to the base of frame
+ /* save stack pointer, add magic word for debuggerd */
+ sw sp, offThread_bailPtr(a0) # Save SP
+
+ /* set up "named" registers, figure out entry point */
+ move rSELF, a0 # set rSELF
+ LOAD_PC_FROM_SELF()
+ LOAD_FP_FROM_SELF()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+
+#if defined(WITH_JIT)
+.LentryInstr:
+ /* Entry is always a possible trace start */
+ lw a0, offThread_pJitProfTable(rSELF)
+ FETCH_INST() # load rINST from rPC
+ sw zero, offThread_inJitCodeCache(rSELF)
+#if !defined(WITH_SELF_VERIFICATION)
+ bnez a0, common_updateProfile # profiling is enabled
+#else
+ lw a2, offThread_shadowSpace(rSELF) # to find out the jit exit state
+ beqz a0, 1f # profiling is disabled
+ lw a3, offShadowSpace_jitExitState(a2) # jit exit state
+ li t0, kSVSTraceSelect
+ bne a3, t0, 2f
+ li a2, kJitTSelectRequestHot # ask for trace selection
+ b common_selectTrace # go build the trace
+2:
+ li a4, kSVSNoProfile
+ beq a3, a4, 1f # don't profile the next instruction?
+ b common_updateProfile # collect profiles
+#endif
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ /* start executing the instruction at rPC */
+ FETCH_INST() # load rINST from rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+.Lbad_arg:
+ la a0, .LstrBadEntryPoint
+ #a1 holds value of entryPoint
+ JAL(printf)
+ JAL(dvmAbort)
+
+ .end dvmMterpStdRun
+
+ .global dvmMterpStdBail
+ .ent dvmMterpStdBail
+
+/* Restore the stack pointer and all the registers stored at sp from the save
+ * point established on entry. Return to whoever called dvmMterpStdRun.
+ *
+ * On entry:
+ * a0 Thread* self
+ */
+dvmMterpStdBail:
+ lw sp, offThread_bailPtr(a0) # Restore sp
+ STACK_LOAD_FULL()
+ jr ra
+
+ .end dvmMterpStdBail
+
+
+ .global dvmAsmInstructionStart
+ .type dvmAsmInstructionStart, %function
+dvmAsmInstructionStart = .L_OP_NOP
+ .text
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NOP: /* 0x00 */
+/* File: mips/OP_NOP.S */
+ FETCH_ADVANCE_INST(1) # advance to next instr, load rINST
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0) # execute it
+
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ .type dalvik_inst, @function
+dalvik_inst:
+ .ent dalvik_inst
+ .end dalvik_inst
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE: /* 0x01 */
+/* File: mips/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_FROM16: /* 0x02 */
+/* File: mips/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_16: /* 0x03 */
+/* File: mips/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(a1, 2) # a1 <- BBBB
+ FETCH(a0, 1) # a0 <- AAAA
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2 and jump
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_WIDE: /* 0x04 */
+/* File: mips/OP_MOVE_WIDE.S */
+ /* move-wide vA, vB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ GET_OPA4(a2) # a2 <- A(+)
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ EAS2(a2, rFP, a2) # a2 <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[B]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a2) # fp[A] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: mips/OP_MOVE_WIDE_FROM16.S */
+ /* move-wide/from16 vAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 1) # a3 <- BBBB
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a2) # fp[AA] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: mips/OP_MOVE_WIDE_16.S */
+ /* move-wide/16 vAAAA, vBBBB */
+ /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
+ FETCH(a3, 2) # a3 <- BBBB
+ FETCH(a2, 1) # a2 <- AAAA
+ EAS2(a3, rFP, a3) # a3 <- &fp[BBBB]
+ EAS2(a2, rFP, a2) # a2 <- &fp[AAAA]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[BBBB]
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a2) # fp[AAAA] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_OBJECT: /* 0x07 */
+/* File: mips/OP_MOVE_OBJECT.S */
+/* File: mips/OP_MOVE.S */
+ /* for move, move-object, long-to-int */
+ /* op vA, vB */
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: mips/OP_MOVE_OBJECT_FROM16.S */
+/* File: mips/OP_MOVE_FROM16.S */
+ /* for: move/from16, move-object/from16 */
+ /* op vAA, vBBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ GET_OPA(a0) # a0 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[AA] <- a2
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: mips/OP_MOVE_OBJECT_16.S */
+/* File: mips/OP_MOVE_16.S */
+ /* for: move/16, move-object/16 */
+ /* op vAAAA, vBBBB */
+ FETCH(a1, 2) # a1 <- BBBB
+ FETCH(a0, 1) # a0 <- AAAA
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_VREG(a2, a1) # a2 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[AAAA] <- a2 and jump
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_RESULT: /* 0x0a */
+/* File: mips/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ LOAD_rSELF_retval(a0) # a0 <- self->retval.i
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: mips/OP_MOVE_RESULT_WIDE.S */
+ /* move-result-wide vAA */
+ GET_OPA(a2) # a2 <- AA
+ addu a3, rSELF, offThread_retval # a3 <- &self->retval
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ LOAD64(a0, a1, a3) # a0/a1 <- retval.j
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a2) # fp[AA] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: mips/OP_MOVE_RESULT_OBJECT.S */
+/* File: mips/OP_MOVE_RESULT.S */
+ /* for: move-result, move-result-object */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ LOAD_rSELF_retval(a0) # a0 <- self->retval.i
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a2, t0) # fp[AA] <- a0
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: mips/OP_MOVE_EXCEPTION.S */
+ /* move-exception vAA */
+ GET_OPA(a2) # a2 <- AA
+ LOAD_offThread_exception(a3, rSELF) # a3 <- dvmGetException bypass
+ li a1, 0 # a1 <- 0
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ SET_VREG(a3, a2) # fp[AA] <- exception obj
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE_offThread_exception(a1, rSELF) # dvmClearException bypass
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_RETURN_VOID: /* 0x0e */
+/* File: mips/OP_RETURN_VOID.S */
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_RETURN: /* 0x0f */
+/* File: mips/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a0, a2) # a0 <- vAA
+ sw a0, offThread_retval(rSELF) # retval.i <- vAA
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_RETURN_WIDE: /* 0x10 */
+/* File: mips/OP_RETURN_WIDE.S */
+ /*
+ * Return a 64-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ */
+ /* return-wide vAA */
+ GET_OPA(a2) # a2 <- AA
+ EAS2(a2, rFP, a2) # a2 <- &fp[AA]
+ addu a3, rSELF, offThread_retval # a3 <- &self->retval
+ LOAD64(a0, a1, a2) # a0/a1 <- vAA/vAA+1
+ STORE64(a0, a1, a3) # retval <- a0/a1
+ b common_returnFromMethod
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_RETURN_OBJECT: /* 0x11 */
+/* File: mips/OP_RETURN_OBJECT.S */
+/* File: mips/OP_RETURN.S */
+ /*
+ * Return a 32-bit value. Copies the return value into the "thread"
+ * structure, then jumps to the return handler.
+ *
+ * for: return, return-object
+ */
+ /* op vAA */
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a0, a2) # a0 <- vAA
+ sw a0, offThread_retval(rSELF) # retval.i <- vAA
+ b common_returnFromMethod
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_4: /* 0x12 */
+/* File: mips/OP_CONST_4.S */
+ # const/4 vA, /* +B */
+ sll a1, rINST, 16 # a1 <- Bxxx0000
+ GET_OPA(a0) # a0 <- A+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sra a1, a1, 28 # a1 <- sssssssB (sign-extended)
+ and a0, a0, 15
+ GET_INST_OPCODE(t0) # ip <- opcode from rINST
+ SET_VREG_GOTO(a1, a0, t0) # fp[A] <- a1
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_16: /* 0x13 */
+/* File: mips/OP_CONST_16.S */
+ # const/16 vAA, /* +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST: /* 0x14 */
+/* File: mips/OP_CONST.S */
+ # const vAA, /* +BBBBbbbb */
+ GET_OPA(a3) # a3 <- AA
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ sll a1, a1, 16
+ or a0, a1, a0 # a0 <- BBBBbbbb
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_HIGH16: /* 0x15 */
+/* File: mips/OP_CONST_HIGH16.S */
+ # const/high16 vAA, /* +BBBB0000 */
+ FETCH(a0, 1) # a0 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ sll a0, a0, 16 # a0 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, a3, t0) # vAA <- a0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_WIDE_16: /* 0x16 */
+/* File: mips/OP_CONST_WIDE_16.S */
+ # const-wide/16 vAA, /* +BBBB */
+ FETCH_S(a0, 1) # a0 <- ssssBBBB (sign-extended)
+ GET_OPA(a3) # a3 <- AA
+ sra a1, a0, 31 # a1 <- ssssssss
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a3, rFP, a3) # a3 <- &fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_WIDE_32: /* 0x17 */
+/* File: mips/OP_CONST_WIDE_32.S */
+ # const-wide/32 vAA, /* +BBBBbbbb */
+ FETCH(a0, 1) # a0 <- 0000bbbb (low)
+ GET_OPA(a3) # a3 <- AA
+ FETCH_S(a2, 2) # a2 <- ssssBBBB (high)
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ sll a2, a2, 16
+ or a0, a0, a2 # a0 <- BBBBbbbb
+ EAS2(a3, rFP, a3) # a3 <- &fp[AA]
+ sra a1, a0, 31 # a1 <- ssssssss
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_WIDE: /* 0x18 */
+/* File: mips/OP_CONST_WIDE.S */
+ # const-wide vAA, /* +HHHHhhhhBBBBbbbb */
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (low middle)
+ FETCH(a2, 3) # a2 <- hhhh (high middle)
+ sll a1, 16 #
+ or a0, a1 # a0 <- BBBBbbbb (low word)
+ FETCH(a3, 4) # a3 <- HHHH (high)
+ GET_OPA(t1) # t1 <- AA
+ sll a3, 16
+ or a1, a3, a2 # a1 <- HHHHhhhh (high word)
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ EAS2(t1, rFP, t1) # t1 <- &fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, t1) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: mips/OP_CONST_WIDE_HIGH16.S */
+ # const-wide/high16 vAA, /* +BBBB000000000000 */
+ FETCH(a1, 1) # a1 <- 0000BBBB (zero-extended)
+ GET_OPA(a3) # a3 <- AA
+ li a0, 0 # a0 <- 00000000
+ sll a1, 16 # a1 <- BBBB0000
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a3, rFP, a3) # a3 <- &fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_STRING: /* 0x1a */
+/* File: mips/OP_CONST_STRING.S */
+ # const/string vAA, String /* BBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_rSELF_methodClassDex(a2) # a2 <- self->methodClassDex
+ GET_OPA(rOBJ) # rOBJ <- AA
+ LOAD_base_offDvmDex_pResStrings(a2, a2) # a2 <- dvmDex->pResStrings
+ LOAD_eas2(v0, a2, a1) # v0 <- pResStrings[BBBB]
+ # not yet resolved?
+ bnez v0, .LOP_CONST_STRING_resolve
+ /*
+ * Continuation if the String has not yet been resolved.
+ * a1: BBBB (String ref)
+ * rOBJ: target register
+ */
+ EXPORT_PC()
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveString) # v0 <- String reference
+ # failed?
+ beqz v0, common_exceptionThrown # yup, handle the exception
+
+.LOP_CONST_STRING_resolve:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
+
+
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: mips/OP_CONST_STRING_JUMBO.S */
+ # const/string vAA, String /* BBBBBBBB */
+ FETCH(a0, 1) # a0 <- bbbb (low)
+ FETCH(a1, 2) # a1 <- BBBB (high)
+ LOAD_rSELF_methodClassDex(a2) # a2 <- self->methodClassDex
+ GET_OPA(rOBJ) # rOBJ <- AA
+ LOAD_base_offDvmDex_pResStrings(a2, a2) # a2 <- dvmDex->pResStrings
+ sll a1, a1, 16
+ or a1, a1, a0 # a1 <- BBBBbbbb
+ LOAD_eas2(v0, a2, a1) # v0 <- pResStrings[BBBB]
+ bnez v0, .LOP_CONST_STRING_JUMBO_resolve
+
+ /*
+ * Continuation if the String has not yet been resolved.
+ * a1: BBBBBBBB (String ref)
+ * rOBJ: target register
+ */
+ EXPORT_PC()
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveString) # v0 <- String reference
+ # failed?
+ beqz v0, common_exceptionThrown # yup, handle the exception
+
+.LOP_CONST_STRING_JUMBO_resolve:
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t1) # vAA <- v0
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_CLASS: /* 0x1c */
+/* File: mips/OP_CONST_CLASS.S */
+ # const/class vAA, Class /* BBBB */
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_rSELF_methodClassDex(a2) # a2 <- self->methodClassDex
+ GET_OPA(rOBJ) # rOBJ <- AA
+ LOAD_base_offDvmDex_pResClasses(a2, a2) # a2 <- dvmDex->pResClasses
+ LOAD_eas2(v0, a2, a1) # v0 <- pResClasses[BBBB]
+
+ bnez v0, .LOP_CONST_CLASS_resolve # v0!=0 => resolved-ok
+ /*
+ * Continuation if the Class has not yet been resolved.
+ * a1: BBBB (Class ref)
+ * rOBJ: target register
+ */
+ EXPORT_PC()
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ li a2, 1 # a2 <- true
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- Class reference
+ # failed==0?
+ beqz v0, common_exceptionThrown # yup, handle the exception
+
+.LOP_CONST_CLASS_resolve:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vAA <- v0
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MONITOR_ENTER: /* 0x1d */
+/* File: mips/OP_MONITOR_ENTER.S */
+ /*
+ * Synchronize on an object.
+ */
+ /* monitor-enter vAA */
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a1, a2) # a1 <- vAA (object)
+ move a0, rSELF # a0 <- self
+ EXPORT_PC() # export PC so we can grab stack trace
+ # null object?
+ beqz a1, common_errNullObject # null object, throw an exception
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ JAL(dvmLockObject) # call(self, obj)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MONITOR_EXIT: /* 0x1e */
+/* File: mips/OP_MONITOR_EXIT.S */
+ /*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction. See the Dalvik
+ * instruction spec.
+ */
+ /* monitor-exit vAA */
+ GET_OPA(a2) # a2 <- AA
+ EXPORT_PC() # before fetch: export the PC
+ GET_VREG(a1, a2) # a1 <- vAA (object)
+ # null object?
+ beqz a1, 1f
+ move a0, rSELF # a0 <- self
+ JAL(dvmUnlockObject) # v0 <- success for unlock(self, obj)
+ # failed?
+ FETCH_ADVANCE_INST(1) # before throw: advance rPC, load rINST
+ beqz v0, common_exceptionThrown # yes, exception is pending
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+1:
+ FETCH_ADVANCE_INST(1) # before throw: advance rPC, load rINST
+ b common_errNullObject
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CHECK_CAST: /* 0x1f */
+/* File: mips/OP_CHECK_CAST.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ # check-cast vAA, class /* BBBB */
+ GET_OPA(a3) # a3 <- AA
+ FETCH(a2, 1) # a2 <- BBBB
+ GET_VREG(rOBJ, a3) # rOBJ <- object
+ LOAD_rSELF_methodClassDex(a0) # a0 <- pDvmDex
+ LOAD_base_offDvmDex_pResClasses(a0, a0) # a0 <- pDvmDex->pResClasses
+ # is object null?
+ beqz rOBJ, .LOP_CHECK_CAST_okay # null obj, cast always succeeds
+ LOAD_eas2(a1, a0, a2) # a1 <- resolved class
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- obj->clazz
+ # have we resolved this before?
+ beqz a1, .LOP_CHECK_CAST_resolve # not resolved, do it now
+.LOP_CHECK_CAST_resolved:
+ # same class (trivial success)?
+ bne a0, a1, .LOP_CHECK_CAST_fullcheck # no, do full check
+.LOP_CHECK_CAST_okay:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * a0 holds obj->clazz
+ * a1 holds class resolved from BBBB
+ * rOBJ holds object
+ */
+.LOP_CHECK_CAST_fullcheck:
+ move rBIX,a1 # avoid ClassObject getting clobbered
+ JAL(dvmInstanceofNonTrivial) # v0 <- boolean result
+ # failed?
+ bnez v0, .LOP_CHECK_CAST_okay # no, success
+ b .LOP_CHECK_CAST_castfailure
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INSTANCE_OF: /* 0x20 */
+/* File: mips/OP_INSTANCE_OF.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ */
+ # instance-of vA, vB, class /* CCCC */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a3) # a0 <- vB (object)
+ LOAD_rSELF_methodClassDex(a2) # a2 <- pDvmDex
+ # is object null?
+ beqz a0, .LOP_INSTANCE_OF_store # null obj, not an instance, store a0
+ FETCH(a3, 1) # a3 <- CCCC
+ LOAD_base_offDvmDex_pResClasses(a2, a2) # a2 <- pDvmDex->pResClasses
+ LOAD_eas2(a1, a2, a3) # a1 <- resolved class
+ LOAD_base_offObject_clazz(a0, a0) # a0 <- obj->clazz
+ # have we resolved this before?
+ beqz a1, .LOP_INSTANCE_OF_resolve # not resolved, do it now
+.LOP_INSTANCE_OF_resolved: # a0=obj->clazz, a1=resolved class
+ # same class (trivial success)?
+ beq a0, a1, .LOP_INSTANCE_OF_trivial # yes, trivial finish
+ b .LOP_INSTANCE_OF_fullcheck # no, do full check
+
+ /*
+ * Trivial test succeeded, save and bail.
+ * rOBJ holds A
+ */
+.LOP_INSTANCE_OF_trivial:
+ li a0, 1 # indicate success
+ # fall thru
+ /*
+ * a0 holds boolean result
+ * rOBJ holds A
+ */
+.LOP_INSTANCE_OF_store:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ SET_VREG(a0, rOBJ) # vA <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: mips/OP_ARRAY_LENGTH.S */
+ /*
+ * Return the length of an array.
+ */
+ GET_OPB(a1) # a1 <- B
+ GET_OPA4(a2) # a2 <- A+
+ GET_VREG(a0, a1) # a0 <- vB (object ref)
+ # is object null?
+ beqz a0, common_errNullObject # yup, fail
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- array length
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a3, a2, t0) # vA <- length
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NEW_INSTANCE: /* 0x22 */
+/* File: mips/OP_NEW_INSTANCE.S */
+ /*
+ * Create a new instance of a class.
+ */
+ # new-instance vAA, class /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved class
+#if defined(WITH_JIT)
+ EAS2(rBIX, a3, a1) # rBIX <- &resolved_class
+#endif
+ EXPORT_PC() # req'd for init, resolve, alloc
+ # already resolved?
+ beqz a0, .LOP_NEW_INSTANCE_resolve # no, resolve it now
+.LOP_NEW_INSTANCE_resolved: # a0=class
+ lbu a1, offClassObject_status(a0) # a1 <- ClassStatus enum
+ # has class been initialized?
+ li t0, CLASS_INITIALIZED
+ move rOBJ, a0 # save a0
+ bne a1, t0, .LOP_NEW_INSTANCE_needinit # no, init class now
+
+.LOP_NEW_INSTANCE_initialized: # a0=class
+ LOAD_base_offClassObject_accessFlags(a3, a0) # a3 <- clazz->accessFlags
+ li a1, ALLOC_DONT_TRACK # flags for alloc call
+ # a0=class
+ JAL(dvmAllocObject) # v0 <- new object
+ GET_OPA(a3) # a3 <- AA
+#if defined(WITH_JIT)
+ /*
+ * The JIT needs the class to be fully resolved before it can
+ * include this instruction in a trace.
+ */
+ lhu a1, offThread_subMode(rSELF)
+ beqz v0, common_exceptionThrown # yes, handle the exception
+ and a1, kSubModeJitTraceBuild # under construction?
+ bnez a1, .LOP_NEW_INSTANCE_jitCheck
+#else
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle the exception
+#endif
+ b .LOP_NEW_INSTANCE_continue
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NEW_ARRAY: /* 0x23 */
+/* File: mips/OP_NEW_ARRAY.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array vA, vB, class@CCCC */
+ GET_OPB(a0) # a0 <- B
+ FETCH(a2, 1) # a2 <- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ GET_VREG(a1, a0) # a1 <- vB (array length)
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ LOAD_eas2(a0, a3, a2) # a0 <- resolved class
+ # check length
+ bltz a1, common_errNegativeArraySize # negative length, bail - len in a1
+ EXPORT_PC() # req'd for resolve, alloc
+ # already resolved?
+ beqz a0, .LOP_NEW_ARRAY_resolve
+
+ /*
+ * Finish allocation.
+ *
+ * a0 holds class
+ * a1 holds array length
+ */
+.LOP_NEW_ARRAY_finish:
+ li a2, ALLOC_DONT_TRACK # don't track in local refs table
+ JAL(dvmAllocArrayByClass) # v0 <- call(clazz, length, flags)
+ GET_OPA4(a2) # a2 <- A+
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle the exception
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(v0, a2) # vA <- v0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: mips/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, type /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ EXPORT_PC() # need for resolve and alloc
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved class
+ GET_OPA(rOBJ) # rOBJ <- AA or BA
+ # already resolved?
+ bnez a0, .LOP_FILLED_NEW_ARRAY_continue # yes, continue on
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- call(clazz, ref)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_continue
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: mips/OP_FILLED_NEW_ARRAY_RANGE.S */
+/* File: mips/OP_FILLED_NEW_ARRAY.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, type /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ EXPORT_PC() # need for resolve and alloc
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved class
+ GET_OPA(rOBJ) # rOBJ <- AA or BA
+ # already resolved?
+ bnez a0, .LOP_FILLED_NEW_ARRAY_RANGE_continue # yes, continue on
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- call(clazz, ref)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_RANGE_continue
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: mips/OP_FILL_ARRAY_DATA.S */
+ /* fill-array-data vAA, +BBBBBBBB */
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll a1, a1, 16 # a1 <- BBBBbbbb
+ or a1, a0, a1 # a1 <- BBBBbbbb
+ GET_VREG(a0, a3) # a0 <- vAA (array object)
+ EAS1(a1, rPC, a1) # a1 <- PC + BBBBbbbb*2 (array data off.)
+ EXPORT_PC()
+ JAL(dvmInterpHandleFillArrayData) # fill the array with predefined data
+ # 0 means an exception is thrown
+ beqz v0, common_exceptionThrown # has exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_THROW: /* 0x27 */
+/* File: mips/OP_THROW.S */
+ /*
+ * Throw an exception object in the current thread.
+ */
+ /* throw vAA */
+ GET_OPA(a2) # a2 <- AA
+ GET_VREG(a1, a2) # a1 <- vAA (exception object)
+ EXPORT_PC() # exception handler can throw
+ # null object?
+ beqz a1, common_errNullObject # yes, throw an NPE instead
+ # bypass dvmSetException, just store it
+ STORE_offThread_exception(a1, rSELF) # thread->exception <- obj
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_GOTO: /* 0x28 */
+/* File: mips/OP_GOTO.S */
+ /*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto +AA */
+ sll a0, rINST, 16 # a0 <- AAxx0000
+ sra a1, a0, 24 # a1 <- ssssssAA (sign-extended)
+ addu a2, a1, a1 # a2 <- byte offset
+ /* If backwards branch refresh rBASE */
+ bgez a1, 1f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+1:
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bltz a1, common_testUpdateProfile # (a0) check for trace hotness
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_GOTO_16: /* 0x29 */
+/* File: mips/OP_GOTO_16.S */
+ /*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+ /* goto/16 +AAAA */
+ FETCH_S(a0, 1) # a0 <- ssssAAAA (sign-extended)
+ addu a1, a0, a0 # a1 <- byte offset, flags set
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgez a1, 1f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+1:
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bltz a1, common_testUpdateProfile # (a0) hot trace head?
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_GOTO_32: /* 0x2a */
+/* File: mips/OP_GOTO_32.S */
+ /*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Unlike most opcodes, this one is allowed to branch to itself, so
+ * our "backward branch" test must be "<=0" instead of "<0".
+ */
+ /* goto/32 +AAAAAAAA */
+ FETCH(a0, 1) # a0 <- aaaa (lo)
+ FETCH(a1, 2) # a1 <- AAAA (hi)
+ sll a1, a1, 16
+ or a0, a0, a1 # a0 <- AAAAaaaa
+ addu a1, a0, a0 # a1 <- byte offset
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgtz a1, 1f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+1:
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ blez a1, common_testUpdateProfile # (a0) hot trace head?
+#else
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+ bgtz a0, 2f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+2:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_PACKED_SWITCH: /* 0x2b */
+/* File: mips/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * When the JIT is present, all targets are considered treated as
+ * a potential trace heads regardless of branch direction.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll t0, a1, 16
+ or a0, a0, t0 # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL(dvmInterpHandlePackedSwitch) # a0 <- code-unit branch offset
+ addu a1, v0, v0 # a1 <- byte offset
+ bgtz a1, 1f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+1:
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bnez a0, common_updateProfile
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: mips/OP_SPARSE_SWITCH.S */
+/* File: mips/OP_PACKED_SWITCH.S */
+ /*
+ * Handle a packed-switch or sparse-switch instruction. In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * When the JIT is present, all targets are considered treated as
+ * a potential trace heads regardless of branch direction.
+ *
+ * for: packed-switch, sparse-switch
+ */
+ /* op vAA, +BBBB */
+ FETCH(a0, 1) # a0 <- bbbb (lo)
+ FETCH(a1, 2) # a1 <- BBBB (hi)
+ GET_OPA(a3) # a3 <- AA
+ sll t0, a1, 16
+ or a0, a0, t0 # a0 <- BBBBbbbb
+ GET_VREG(a1, a3) # a1 <- vAA
+ EAS1(a0, rPC, a0) # a0 <- PC + BBBBbbbb*2
+ JAL(dvmInterpHandleSparseSwitch) # a0 <- code-unit branch offset
+ addu a1, v0, v0 # a1 <- byte offset
+ bgtz a1, 1f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh handler base
+1:
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bnez a0, common_updateProfile
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CMPL_FLOAT: /* 0x2d */
+/* File: mips/OP_CMPL_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+
+ /* "clasic" form */
+ FETCH(a0, 1) # a0 <- CCBB
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8
+#ifdef SOFT_FLOAT
+ GET_VREG(rOBJ, a2) # rOBJ <- vBB
+ GET_VREG(rBIX, a3) # rBIX <- vCC
+ move a0, rOBJ # a0 <- vBB
+ move a1, rBIX # a1 <- vCC
+ JAL(__eqsf2) # a0 <- (vBB == vCC)
+ li rTEMP, 0 # set rTEMP to 0
+ beqz v0, OP_CMPL_FLOAT_finish
+ move a0, rOBJ # a0 <- vBB
+ move a1, rBIX # a1 <- vCC
+ JAL(__ltsf2) # a0 <- (vBB < vCC)
+ li rTEMP, -1
+ bltz v0, OP_CMPL_FLOAT_finish
+ move a0, rOBJ # a0 <- vBB
+ move a1, rBIX # a1 <- vCC
+ b OP_CMPL_FLOAT_continue
+#else
+ GET_VREG_F(fs0, a2)
+ GET_VREG_F(fs1, a3)
+ c.olt.s fcc0, fs0, fs1 # Is fs0 < fs1
+ li rTEMP, -1
+ bc1t fcc0, OP_CMPL_FLOAT_finish
+ c.olt.s fcc0, fs1, fs0
+ li rTEMP, 1
+ bc1t fcc0, OP_CMPL_FLOAT_finish
+ c.eq.s fcc0, fs0, fs1
+ li rTEMP, 0
+ bc1t fcc0, OP_CMPL_FLOAT_finish
+ b OP_CMPL_FLOAT_nan
+
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CMPG_FLOAT: /* 0x2e */
+/* File: mips/OP_CMPG_FLOAT.S */
+/* File: mips/OP_CMPL_FLOAT.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * The operation we're implementing is:
+ * if (x == y)
+ * return 0;
+ * else if (x < y)
+ * return -1;
+ * else if (x > y)
+ * return 1;
+ * else
+ * return {-1,1}; // one or both operands was NaN
+ *
+ * for: cmpl-float, cmpg-float
+ */
+ /* op vAA, vBB, vCC */
+
+ /* "clasic" form */
+ FETCH(a0, 1) # a0 <- CCBB
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8
+#ifdef SOFT_FLOAT
+ GET_VREG(rOBJ, a2) # rOBJ <- vBB
+ GET_VREG(rBIX, a3) # rBIX <- vCC
+ move a0, rOBJ # a0 <- vBB
+ move a1, rBIX # a1 <- vCC
+ JAL(__eqsf2) # a0 <- (vBB == vCC)
+ li rTEMP, 0 # set rTEMP to 0
+ beqz v0, OP_CMPG_FLOAT_finish
+ move a0, rOBJ # a0 <- vBB
+ move a1, rBIX # a1 <- vCC
+ JAL(__ltsf2) # a0 <- (vBB < vCC)
+ li rTEMP, -1
+ bltz v0, OP_CMPG_FLOAT_finish
+ move a0, rOBJ # a0 <- vBB
+ move a1, rBIX # a1 <- vCC
+ b OP_CMPG_FLOAT_continue
+#else
+ GET_VREG_F(fs0, a2)
+ GET_VREG_F(fs1, a3)
+ c.olt.s fcc0, fs0, fs1 # Is fs0 < fs1
+ li rTEMP, -1
+ bc1t fcc0, OP_CMPG_FLOAT_finish
+ c.olt.s fcc0, fs1, fs0
+ li rTEMP, 1
+ bc1t fcc0, OP_CMPG_FLOAT_finish
+ c.eq.s fcc0, fs0, fs1
+ li rTEMP, 0
+ bc1t fcc0, OP_CMPG_FLOAT_finish
+ b OP_CMPG_FLOAT_nan
+
+#endif
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: mips/OP_CMPL_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+
+ FETCH(a0, 1) # a0 <- CCBB
+ and rOBJ, a0, 255 # s0 <- BB
+ srl rBIX, a0, 8 # t0 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s0 <- &fp[BB]
+ EAS2(rBIX, rFP, rBIX) # t0 <- &fp[CC]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1
+ JAL(__eqdf2) # cmp <=: C clear if <, Z set if eq
+ li rTEMP, 0
+ beqz v0, OP_CMPL_DOUBLE_finish
+
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1
+ JAL(__ltdf2)
+ li rTEMP, -1
+ bltz v0, OP_CMPL_DOUBLE_finish
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
+ b OP_CMPL_DOUBLE_continue
+#else
+ LOAD64_F(fs0, fs0f, rOBJ)
+ LOAD64_F(fs1, fs1f, rBIX)
+ c.olt.d fcc0, fs0, fs1
+ li rTEMP, -1
+ bc1t fcc0, OP_CMPL_DOUBLE_finish
+ c.olt.d fcc0, fs1, fs0
+ li rTEMP, 1
+ bc1t fcc0, OP_CMPL_DOUBLE_finish
+ c.eq.d fcc0, fs0, fs1
+ li rTEMP, 0
+ bc1t fcc0, OP_CMPL_DOUBLE_finish
+ b OP_CMPL_DOUBLE_nan
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: mips/OP_CMPG_DOUBLE.S */
+/* File: mips/OP_CMPL_DOUBLE.S */
+ /*
+ * Compare two floating-point values. Puts 0, 1, or -1 into the
+ * destination register based on the results of the comparison.
+ *
+ * Provide a "naninst" instruction that puts 1 or -1 into a1 depending
+ * on what value we'd like to return when one of the operands is NaN.
+ *
+ * See OP_CMPL_FLOAT for an explanation.
+ *
+ * For: cmpl-double, cmpg-double
+ */
+ /* op vAA, vBB, vCC */
+
+ FETCH(a0, 1) # a0 <- CCBB
+ and rOBJ, a0, 255 # s0 <- BB
+ srl rBIX, a0, 8 # t0 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s0 <- &fp[BB]
+ EAS2(rBIX, rFP, rBIX) # t0 <- &fp[CC]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1
+ JAL(__eqdf2) # cmp <=: C clear if <, Z set if eq
+ li rTEMP, 0
+ beqz v0, OP_CMPG_DOUBLE_finish
+
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1
+ JAL(__ltdf2)
+ li rTEMP, -1
+ bltz v0, OP_CMPG_DOUBLE_finish
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1
+ b OP_CMPG_DOUBLE_continue
+#else
+ LOAD64_F(fs0, fs0f, rOBJ)
+ LOAD64_F(fs1, fs1f, rBIX)
+ c.olt.d fcc0, fs0, fs1
+ li rTEMP, -1
+ bc1t fcc0, OP_CMPG_DOUBLE_finish
+ c.olt.d fcc0, fs1, fs0
+ li rTEMP, 1
+ bc1t fcc0, OP_CMPG_DOUBLE_finish
+ c.eq.d fcc0, fs0, fs1
+ li rTEMP, 0
+ bc1t fcc0, OP_CMPG_DOUBLE_finish
+ b OP_CMPG_DOUBLE_nan
+#endif
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CMP_LONG: /* 0x31 */
+/* File: mips/OP_CMP_LONG.S */
+ /*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * I think I can improve on the ARM code by the following observation
+ * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
+ * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
+ * subu v0, t0, t1 # v0= -1:1:0 for [ < > = ]
+ */
+ /* cmp-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(a3, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, a3) # a2/a3 <- vCC/vCC+1
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ slt t0, a1, a3 # compare hi
+ sgt t1, a1, a3
+ subu v0, t1, t0 # v0 <- (-1, 1, 0)
+ bnez v0, .LOP_CMP_LONG_finish
+ # at this point x.hi==y.hi
+ sltu t0, a0, a2 # compare lo
+ sgtu t1, a0, a2
+ subu v0, t1, t0 # v0 <- (-1, 1, 0) for [< > =]
+
+.LOP_CMP_LONG_finish:
+ SET_VREG(v0, rOBJ) # vAA <- v0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_EQ: /* 0x32 */
+/* File: mips/OP_IF_EQ.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ bne a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(a1, 1) # a1<- branch offset, in code units
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a2, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a2, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+3:
+ bnez a0, common_updateProfile
+#else
+ bgez a2, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_NE: /* 0x33 */
+/* File: mips/OP_IF_NE.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ beq a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(a1, 1) # a1<- branch offset, in code units
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a2, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a2, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+3:
+ bnez a0, common_updateProfile
+#else
+ bgez a2, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_LT: /* 0x34 */
+/* File: mips/OP_IF_LT.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ bge a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(a1, 1) # a1<- branch offset, in code units
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a2, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a2, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+3:
+ bnez a0, common_updateProfile
+#else
+ bgez a2, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_GE: /* 0x35 */
+/* File: mips/OP_IF_GE.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ blt a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(a1, 1) # a1<- branch offset, in code units
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a2, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a2, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+3:
+ bnez a0, common_updateProfile
+#else
+ bgez a2, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_GT: /* 0x36 */
+/* File: mips/OP_IF_GT.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ ble a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(a1, 1) # a1<- branch offset, in code units
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a2, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a2, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+3:
+ bnez a0, common_updateProfile
+#else
+ bgez a2, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_LE: /* 0x37 */
+/* File: mips/OP_IF_LE.S */
+/* File: mips/bincmp.S */
+ /*
+ * Generic two-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+ /* if-cmp vA, vB, +CCCC */
+ GET_OPA4(a0) # a0 <- A+
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a3, a1) # a3 <- vB
+ GET_VREG(a2, a0) # a2 <- vA
+ bgt a2, a3, 1f # branch to 1 if comparison failed
+ FETCH_S(a1, 1) # a1<- branch offset, in code units
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a2, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a2) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a2, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+3:
+ bnez a0, common_updateProfile
+#else
+ bgez a2, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_EQZ: /* 0x38 */
+/* File: mips/OP_IF_EQZ.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(a1, 1) # a1 <- branch offset, in code units
+ bne a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a1, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a1, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh table base
+3:
+ bnez a0, common_updateProfile # test for JIT off at target
+#else
+ bgez a1, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rtable base
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_NEZ: /* 0x39 */
+/* File: mips/OP_IF_NEZ.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(a1, 1) # a1 <- branch offset, in code units
+ beq a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a1, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a1, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh table base
+3:
+ bnez a0, common_updateProfile # test for JIT off at target
+#else
+ bgez a1, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rtable base
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_LTZ: /* 0x3a */
+/* File: mips/OP_IF_LTZ.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(a1, 1) # a1 <- branch offset, in code units
+ bge a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a1, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a1, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh table base
+3:
+ bnez a0, common_updateProfile # test for JIT off at target
+#else
+ bgez a1, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rtable base
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_GEZ: /* 0x3b */
+/* File: mips/OP_IF_GEZ.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(a1, 1) # a1 <- branch offset, in code units
+ blt a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a1, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a1, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh table base
+3:
+ bnez a0, common_updateProfile # test for JIT off at target
+#else
+ bgez a1, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rtable base
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_GTZ: /* 0x3c */
+/* File: mips/OP_IF_GTZ.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(a1, 1) # a1 <- branch offset, in code units
+ ble a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a1, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a1, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh table base
+3:
+ bnez a0, common_updateProfile # test for JIT off at target
+#else
+ bgez a1, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rtable base
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IF_LEZ: /* 0x3d */
+/* File: mips/OP_IF_LEZ.S */
+/* File: mips/zcmp.S */
+ /*
+ * Generic one-operand compare-and-branch operation. Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+ /* if-cmp vAA, +BBBB */
+ GET_OPA(a0) # a0 <- AA
+ GET_VREG(a2, a0) # a2 <- vAA
+ FETCH_S(a1, 1) # a1 <- branch offset, in code units
+ bgt a2, zero, 1f # branch to 1 if comparison failed
+ b 2f
+1:
+ li a1, 2 # a1- BYTE branch dist for not-taken
+2:
+ addu a1, a1, a1 # convert to bytes
+ FETCH_ADVANCE_INST_RB(a1) # update rPC, load rINST
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ bgez a1, 3f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh table base
+3:
+ bnez a0, common_updateProfile # test for JIT off at target
+#else
+ bgez a1, 4f
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rtable base
+4:
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_3E: /* 0x3e */
+/* File: mips/OP_UNUSED_3E.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_3F: /* 0x3f */
+/* File: mips/OP_UNUSED_3F.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_40: /* 0x40 */
+/* File: mips/OP_UNUSED_40.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_41: /* 0x41 */
+/* File: mips/OP_UNUSED_41.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_42: /* 0x42 */
+/* File: mips/OP_UNUSED_42.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_43: /* 0x43 */
+/* File: mips/OP_UNUSED_43.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AGET: /* 0x44 */
+/* File: mips/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 2
+ EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lw a2, offArrayObject_contents(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AGET_WIDE: /* 0x45 */
+/* File: mips/OP_AGET_WIDE.S */
+ /*
+ * Array get, 64 bits. vAA <- vBB[vCC].
+ *
+ * Arrays of long/double are 64-bit aligned.
+ */
+ /* aget-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+.LOP_AGET_WIDE_finish:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64_off(a2, a3, a0, offArrayObject_contents)
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a2, a3, rOBJ) # vAA/vAA+1 <- a2/a3
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AGET_OBJECT: /* 0x46 */
+/* File: mips/OP_AGET_OBJECT.S */
+/* File: mips/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 2
+ EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lw a2, offArrayObject_contents(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: mips/OP_AGET_BOOLEAN.S */
+/* File: mips/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 0
+ EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lbu a2, offArrayObject_contents(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AGET_BYTE: /* 0x48 */
+/* File: mips/OP_AGET_BYTE.S */
+/* File: mips/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 0
+ EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lb a2, offArrayObject_contents(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AGET_CHAR: /* 0x49 */
+/* File: mips/OP_AGET_CHAR.S */
+/* File: mips/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 1
+ EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lhu a2, offArrayObject_contents(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AGET_SHORT: /* 0x4a */
+/* File: mips/OP_AGET_SHORT.S */
+/* File: mips/OP_AGET.S */
+ /*
+ * Array get, 32 bits or less. vAA <- vBB[vCC].
+ *
+ * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
+ * instructions. We use a pair of FETCH_Bs instead.
+ *
+ * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 1
+ EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ # a1 >= a3; compare unsigned index
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ lh a2, offArrayObject_contents(a0) # a2 <- vBB[vCC]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a2, rOBJ, t0) # vAA <- a2
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_APUT: /* 0x4b */
+/* File: mips/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 2
+ EASN(a0, a0, a1, 2) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sw a2, offArrayObject_contents(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_APUT_WIDE: /* 0x4c */
+/* File: mips/OP_APUT_WIDE.S */
+ /*
+ * Array put, 64 bits. vBB[vCC] <- vAA.
+ *
+ * Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
+ */
+ /* aput-wide vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t0) # t0 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ EAS3(a0, a0, a1) # a0 <- arrayObj + index*width
+ EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
+ # compare unsigned index, length
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a2, a3, rOBJ) # a2/a3 <- vAA/vAA+1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64_off(a2, a3, a0, offArrayObject_contents) # a2/a3 <- vBB[vCC]
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_APUT_OBJECT: /* 0x4d */
+/* File: mips/OP_APUT_OBJECT.S */
+ /*
+ * Store an object into an array. vBB[vCC] <- vAA.
+ *
+ */
+ /* op vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t1) # t1 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ GET_VREG(rINST, a2) # rINST <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ GET_VREG(rBIX, t1) # rBIX <- vAA
+ # null array object?
+ beqz rINST, common_errNullObject # yes, bail
+
+ LOAD_base_offArrayObject_length(a3, rINST) # a3 <- arrayObj->length
+ EAS2(rOBJ, rINST, a1) # rOBJ <- arrayObj + index*width
+ # compare unsigned index, length
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ /*
+ * On entry:
+ * rINST = vBB (arrayObj)
+ * rBIX = vAA (obj)
+ * rOBJ = offset into array (vBB + vCC * width)
+ */
+ bnez rBIX, .LOP_APUT_OBJECT_checks # yes, skip type checks
+.LOP_APUT_OBJECT_finish:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sw rBIX, offArrayObject_contents(rOBJ) # vBB[vCC] <- vAA
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: mips/OP_APUT_BOOLEAN.S */
+/* File: mips/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 0
+ EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sb a2, offArrayObject_contents(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_APUT_BYTE: /* 0x4f */
+/* File: mips/OP_APUT_BYTE.S */
+/* File: mips/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 0
+ EASN(a0, a0, a1, 0) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sb a2, offArrayObject_contents(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_APUT_CHAR: /* 0x50 */
+/* File: mips/OP_APUT_CHAR.S */
+/* File: mips/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 1
+ EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sh a2, offArrayObject_contents(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_APUT_SHORT: /* 0x51 */
+/* File: mips/OP_APUT_SHORT.S */
+/* File: mips/OP_APUT.S */
+ /*
+ * Array put, 32 bits or less. vBB[vCC] <- vAA.
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short
+ */
+ /* op vAA, vBB, vCC */
+ FETCH_B(a2, 1) # a2 <- BB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ FETCH_C(a3, 1) # a3 <- CC
+ GET_VREG(a0, a2) # a0 <- vBB (array object)
+ GET_VREG(a1, a3) # a1 <- vCC (requested index)
+ # null array object?
+ beqz a0, common_errNullObject # yes, bail
+ LOAD_base_offArrayObject_length(a3, a0) # a3 <- arrayObj->length
+ .if 1
+ EASN(a0, a0, a1, 1) # a0 <- arrayObj + index*width
+ .else
+ addu a0, a0, a1
+ .endif
+ bgeu a1, a3, common_errArrayIndex # index >= length, bail
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a2, rOBJ) # a2 <- vAA
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ sh a2, offArrayObject_contents(a0) # vBB[vCC] <- a2
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET: /* 0x52 */
+/* File: mips/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test results
+ move a0, v0
+ bnez v0, .LOP_IGET_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_WIDE: /* 0x53 */
+/* File: mips/OP_IGET_WIDE.S */
+ /*
+ * Wide 32-bit instance field get.
+ */
+ # iget-wide vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_WIDE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test return code
+ move a0, v0
+ bnez v0, .LOP_IGET_WIDE_finish
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_OBJECT: /* 0x54 */
+/* File: mips/OP_IGET_OBJECT.S */
+/* File: mips/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_OBJECT_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test results
+ move a0, v0
+ bnez v0, .LOP_IGET_OBJECT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: mips/OP_IGET_BOOLEAN.S */
+/* File: mips/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_BOOLEAN_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test results
+ move a0, v0
+ bnez v0, .LOP_IGET_BOOLEAN_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_BYTE: /* 0x56 */
+/* File: mips/OP_IGET_BYTE.S */
+/* File: mips/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_BYTE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test results
+ move a0, v0
+ bnez v0, .LOP_IGET_BYTE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_CHAR: /* 0x57 */
+/* File: mips/OP_IGET_CHAR.S */
+/* File: mips/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_CHAR_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test results
+ move a0, v0
+ bnez v0, .LOP_IGET_CHAR_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_SHORT: /* 0x58 */
+/* File: mips/OP_IGET_SHORT.S */
+/* File: mips/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_SHORT_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test results
+ move a0, v0
+ bnez v0, .LOP_IGET_SHORT_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT: /* 0x59 */
+/* File: mips/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_finish # yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_WIDE: /* 0x5a */
+/* File: mips/OP_IPUT_WIDE.S */
+ # iput-wide vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_WIDE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_WIDE_finish # yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_OBJECT: /* 0x5b */
+/* File: mips/OP_IPUT_OBJECT.S */
+ /*
+ * 32-bit instance field put.
+ *
+ * for: iput-object, iput-object-volatile
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_OBJECT_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_OBJECT_finish # yes, finish up
+ b common_exceptionThrown
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: mips/OP_IPUT_BOOLEAN.S */
+/* File: mips/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_BOOLEAN_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_BOOLEAN_finish # yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_BYTE: /* 0x5d */
+/* File: mips/OP_IPUT_BYTE.S */
+/* File: mips/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_BYTE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_BYTE_finish # yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_CHAR: /* 0x5e */
+/* File: mips/OP_IPUT_CHAR.S */
+/* File: mips/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_CHAR_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_CHAR_finish # yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_SHORT: /* 0x5f */
+/* File: mips/OP_IPUT_SHORT.S */
+/* File: mips/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_SHORT_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_SHORT_finish # yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET: /* 0x60 */
+/* File: mips/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_finish # resume
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_WIDE: /* 0x61 */
+/* File: mips/OP_SGET_WIDE.S */
+ /*
+ * 64-bit SGET handler.
+ */
+ # sget-wide vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_SGET_WIDE_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in v0.
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+
+ b .LOP_SGET_WIDE_finish # resume
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_OBJECT: /* 0x62 */
+/* File: mips/OP_SGET_OBJECT.S */
+/* File: mips/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_OBJECT_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_OBJECT_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: mips/OP_SGET_BOOLEAN.S */
+/* File: mips/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_BOOLEAN_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_BOOLEAN_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_BYTE: /* 0x64 */
+/* File: mips/OP_SGET_BYTE.S */
+/* File: mips/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_BYTE_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_BYTE_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_CHAR: /* 0x65 */
+/* File: mips/OP_SGET_CHAR.S */
+/* File: mips/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_CHAR_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_CHAR_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_SHORT: /* 0x66 */
+/* File: mips/OP_SGET_SHORT.S */
+/* File: mips/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_SHORT_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_SHORT_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT: /* 0x67 */
+/* File: mips/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_finish # is resolved entry null?
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_finish # resume
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_WIDE: /* 0x68 */
+/* File: mips/OP_SPUT_WIDE.S */
+ /*
+ * 64-bit SPUT handler.
+ */
+ # sput-wide vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ GET_OPA(t0) # t0 <- AA
+ LOAD_eas2(a2, rBIX, a1) # a2 <- resolved StaticField ptr
+ EAS2(rOBJ, rFP, t0) # rOBJ<- &fp[AA]
+ # is resolved entry null?
+ beqz a2, .LOP_SPUT_WIDE_resolve # yes, do resolve
+.LOP_SPUT_WIDE_finish: # field ptr in a2, AA in rOBJ
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ .if 0
+ addu a2, offStaticField_value # a2<- pointer to data
+ JAL(dvmQuasiAtomicSwap64Sync) # stores a0/a1 into addr a2
+ .else
+ STORE64_off(a0, a1, a2, offStaticField_value) # field <- vAA/vAA+1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_OBJECT: /* 0x69 */
+/* File: mips/OP_SPUT_OBJECT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput-object, sput-object-volatile
+ */
+ /* op vAA, field@BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_OBJECT_finish # is resolved entry null?
+
+ /* Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_OBJECT_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: mips/OP_SPUT_BOOLEAN.S */
+/* File: mips/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_BOOLEAN_finish # is resolved entry null?
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_BOOLEAN_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_BYTE: /* 0x6b */
+/* File: mips/OP_SPUT_BYTE.S */
+/* File: mips/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_BYTE_finish # is resolved entry null?
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_BYTE_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_CHAR: /* 0x6c */
+/* File: mips/OP_SPUT_CHAR.S */
+/* File: mips/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_CHAR_finish # is resolved entry null?
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_CHAR_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_SHORT: /* 0x6d */
+/* File: mips/OP_SPUT_SHORT.S */
+/* File: mips/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_SHORT_finish # is resolved entry null?
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_SHORT_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: mips/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ FETCH(rBIX, 2) # rBIX <- GFED or CCCC
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ .if (!0)
+ and rBIX, rBIX, 15 # rBIX <- D (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, .LOP_INVOKE_VIRTUAL_continue # yes, continue on
+
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ bnez v0, .LOP_INVOKE_VIRTUAL_continue # no, continue
+ b common_exceptionThrown # yes, handle exception
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_SUPER: /* 0x6f */
+/* File: mips/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ FETCH(t0, 2) # t0 <- GFED or CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ .if (!0)
+ and t0, t0, 15 # t0 <- D (or stays CCCC)
+ .endif
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ GET_VREG(rOBJ, t0) # rOBJ <- "this" ptr
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ # null "this"?
+ LOAD_rSELF_method(t1) # t1 <- current method
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ # cmp a0, 0; already resolved?
+ LOAD_base_offMethod_clazz(rBIX, t1) # rBIX <- method->clazz
+ EXPORT_PC() # must export for invoke
+ bnez a0, .LOP_INVOKE_SUPER_continue # resolved, continue on
+
+ move a0, rBIX # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .LOP_INVOKE_SUPER_continue
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: mips/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ FETCH(rBIX, 2) # rBIX <- GFED or CCCC
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+ .if (!0)
+ and rBIX, rBIX, 15 # rBIX <- D (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ # already resolved?
+ bnez a0, 1f # resolved, call the function
+
+ lw a3, offThread_method(rSELF) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_DIRECT # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+
+1:
+ bnez rOBJ, common_invokeMethodNoRange # a0=method, rOBJ="this"
+ b common_errNullObject # yes, throw exception
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_STATIC: /* 0x71 */
+/* File: mips/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ li rOBJ, 0 # null "this" in delay slot
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+ EAS2(rBIX, a3, a1) # rBIX<- &resolved_metherToCall
+#endif
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, common_invokeMethodNoRange # yes, continue on
+ b .LOP_INVOKE_STATIC_resolve
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: mips/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(a2, 2) # a2 <- FEDC or CCCC
+ FETCH(a1, 1) # a1 <- BBBB
+ .if (!0)
+ and a2, 15 # a2 <- C (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ GET_VREG(rOBJ, a2) # rOBJ <- first arg ("this")
+ LOAD_rSELF_methodClassDex(a3) # a3 <- methodClassDex
+ LOAD_rSELF_method(a2) # a2 <- method
+ # null obj?
+ beqz rOBJ, common_errNullObject # yes, fail
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- thisPtr->clazz
+ JAL(dvmFindInterfaceMethodInCache) # v0 <- call(class, ref, method, dex)
+ move a0, v0
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b common_invokeMethodNoRange # (a0=method, rOBJ="this")
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_73: /* 0x73 */
+/* File: mips/OP_UNUSED_73.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: mips/OP_INVOKE_VIRTUAL_RANGE.S */
+/* File: mips/OP_INVOKE_VIRTUAL.S */
+ /*
+ * Handle a virtual method call.
+ *
+ * for: invoke-virtual, invoke-virtual/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ FETCH(rBIX, 2) # rBIX <- GFED or CCCC
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ .if (!1)
+ and rBIX, rBIX, 15 # rBIX <- D (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, .LOP_INVOKE_VIRTUAL_RANGE_continue # yes, continue on
+
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ bnez v0, .LOP_INVOKE_VIRTUAL_RANGE_continue # no, continue
+ b common_exceptionThrown # yes, handle exception
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: mips/OP_INVOKE_SUPER_RANGE.S */
+/* File: mips/OP_INVOKE_SUPER.S */
+ /*
+ * Handle a "super" method call.
+ *
+ * for: invoke-super, invoke-super/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ FETCH(t0, 2) # t0 <- GFED or CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ .if (!1)
+ and t0, t0, 15 # t0 <- D (or stays CCCC)
+ .endif
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ GET_VREG(rOBJ, t0) # rOBJ <- "this" ptr
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ # null "this"?
+ LOAD_rSELF_method(t1) # t1 <- current method
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ # cmp a0, 0; already resolved?
+ LOAD_base_offMethod_clazz(rBIX, t1) # rBIX <- method->clazz
+ EXPORT_PC() # must export for invoke
+ bnez a0, .LOP_INVOKE_SUPER_RANGE_continue # resolved, continue on
+
+ move a0, rBIX # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .LOP_INVOKE_SUPER_RANGE_continue
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: mips/OP_INVOKE_DIRECT_RANGE.S */
+/* File: mips/OP_INVOKE_DIRECT.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ * for: invoke-direct, invoke-direct/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ FETCH(rBIX, 2) # rBIX <- GFED or CCCC
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+ .if (!1)
+ and rBIX, rBIX, 15 # rBIX <- D (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ # already resolved?
+ bnez a0, 1f # resolved, call the function
+
+ lw a3, offThread_method(rSELF) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_DIRECT # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+
+1:
+ bnez rOBJ, common_invokeMethodRange # a0=method, rOBJ="this"
+ b common_errNullObject # yes, throw exception
+
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: mips/OP_INVOKE_STATIC_RANGE.S */
+/* File: mips/OP_INVOKE_STATIC.S */
+ /*
+ * Handle a static method call.
+ *
+ * for: invoke-static, invoke-static/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ li rOBJ, 0 # null "this" in delay slot
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+ EAS2(rBIX, a3, a1) # rBIX<- &resolved_metherToCall
+#endif
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, common_invokeMethodRange # yes, continue on
+ b .LOP_INVOKE_STATIC_RANGE_resolve
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: mips/OP_INVOKE_INTERFACE_RANGE.S */
+/* File: mips/OP_INVOKE_INTERFACE.S */
+ /*
+ * Handle an interface method call.
+ *
+ * for: invoke-interface, invoke-interface/range
+ */
+ /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+ /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
+ FETCH(a2, 2) # a2 <- FEDC or CCCC
+ FETCH(a1, 1) # a1 <- BBBB
+ .if (!1)
+ and a2, 15 # a2 <- C (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ GET_VREG(rOBJ, a2) # rOBJ <- first arg ("this")
+ LOAD_rSELF_methodClassDex(a3) # a3 <- methodClassDex
+ LOAD_rSELF_method(a2) # a2 <- method
+ # null obj?
+ beqz rOBJ, common_errNullObject # yes, fail
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- thisPtr->clazz
+ JAL(dvmFindInterfaceMethodInCache) # v0 <- call(class, ref, method, dex)
+ move a0, v0
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b common_invokeMethodRange # (a0=method, rOBJ="this")
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_79: /* 0x79 */
+/* File: mips/OP_UNUSED_79.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_7A: /* 0x7a */
+/* File: mips/OP_UNUSED_7A.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NEG_INT: /* 0x7b */
+/* File: mips/OP_NEG_INT.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ negu a0, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NOT_INT: /* 0x7c */
+/* File: mips/OP_NOT_INT.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ not a0, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NEG_LONG: /* 0x7d */
+/* File: mips/OP_NEG_LONG.S */
+/* File: mips/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be MIPS instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(t1) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ EAS2(rOBJ, rFP, t1) # rOBJ <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ negu v0, a0 # optional op
+ negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0 # a0/a1 <- op, a2-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NOT_LONG: /* 0x7e */
+/* File: mips/OP_NOT_LONG.S */
+/* File: mips/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be MIPS instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(t1) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ EAS2(rOBJ, rFP, t1) # rOBJ <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ not a0, a0 # optional op
+ not a1, a1 # a0/a1 <- op, a2-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, rOBJ) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NEG_FLOAT: /* 0x7f */
+/* File: mips/OP_NEG_FLOAT.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ addu a0, a0, 0x80000000 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NEG_DOUBLE: /* 0x80 */
+/* File: mips/OP_NEG_DOUBLE.S */
+/* File: mips/unopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be MIPS instruction or a function call.
+ *
+ * For: neg-long, not-long, neg-double, long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(t1) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ EAS2(rOBJ, rFP, t1) # rOBJ <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- vAA
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ addu a1, a1, 0x80000000 # a0/a1 <- op, a2-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, rOBJ) # vAA <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INT_TO_LONG: /* 0x81 */
+/* File: mips/OP_INT_TO_LONG.S */
+/* File: mips/unopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-long, int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(t1) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, a3) # a0 <- vB
+ EAS2(rOBJ, rFP, t1) # rOBJ <- &fp[A]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ sra a1, a0, 31 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, rOBJ) # vA/vA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: mips/OP_INT_TO_FLOAT.S */
+/* File: mips/unflop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t0 <- A+
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, a3) # a0 <- vB
+#else
+ GET_VREG_F(fa0, a3)
+#endif
+ # optional op
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifdef SOFT_FLOAT
+ JAL(__floatsisf) # a0 <- op, a0-a3 changed
+
+.LOP_INT_TO_FLOAT_set_vreg:
+ SET_VREG(v0, rOBJ) # vAA <- result0
+#else
+ cvt.s.w fv0, fa0
+
+.LOP_INT_TO_FLOAT_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ)
+#endif
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GOTO_OPCODE(t1) # jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: mips/OP_INT_TO_DOUBLE.S */
+/* File: mips/unflopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, a3) # a0 <- vB
+#else
+ GET_VREG_F(fa0, a3)
+#endif
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__floatsidf) # result <- op, a0-a3 changed
+
+.LOP_INT_TO_DOUBLE_set_vreg:
+ STORE64(rRESULT0, rRESULT1, rOBJ) # vA/vA+1 <- a0/a1
+#else
+ cvt.d.w fv0, fa0
+
+.LOP_INT_TO_DOUBLE_set_vreg:
+ STORE64_F(fv0, fv0f, rOBJ) # vA/vA+1 <- a0/a1
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_LONG_TO_INT: /* 0x84 */
+/* File: mips/OP_LONG_TO_INT.S */
+ GET_OPB(a1) # a1 <- B from 15:12
+ GET_OPA4(a0) # a0 <- A from 11:8
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifdef HAVE_BIG_ENDIAN
+ addu a1, a1, 1
+#endif
+ GET_VREG(a2, a1) # a2 <- fp[B]
+ GET_INST_OPCODE(t0) # t0 <- opcode from rINST
+ SET_VREG_GOTO(a2, a0, t0) # fp[A] <- a2
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: mips/OP_LONG_TO_FLOAT.S */
+/* File: mips/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0/a1", where
+ * "result" is a 32-bit quantity in a0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ * If hard floating point support is available, use fa0 as the parameter, except for
+ * long-to-float opcode.
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t1 <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a3) # a0/a1 <- vB/vB+1
+#else
+ LOAD64(rARG0, rARG1, a3)
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__floatdisf) # a0 <- op, a0-a3 changed
+
+.LOP_LONG_TO_FLOAT_set_vreg:
+ SET_VREG(v0, rOBJ) # vA <- result0
+#else
+ JAL(__floatdisf)
+
+.LOP_LONG_TO_FLOAT_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ) # vA <- result0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: mips/OP_LONG_TO_DOUBLE.S */
+/* File: mips/unflopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be a MIPS instruction or a function call.
+ *
+ * long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # t1 <- &fp[A]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a3) # a0/a1 <- vAA
+#else
+ LOAD64(rARG0, rARG1, a3)
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ JAL(__floatdidf) # a0/a1 <- op, a2-a3 changed
+
+.LOP_LONG_TO_DOUBLE_set_vreg:
+#ifdef SOFT_FLOAT
+ STORE64(rRESULT0, rRESULT1, rOBJ) # vAA <- a0/a1
+#else
+ STORE64_F(fv0, fv0f, rOBJ) # vAA <- a0/a1
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: mips/OP_FLOAT_TO_INT.S */
+/* File: mips/unflop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: int-to-float, float-to-int
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t0 <- A+
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, a3) # a0 <- vB
+#else
+ GET_VREG_F(fa0, a3)
+#endif
+ # optional op
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+#ifdef SOFT_FLOAT
+ b f2i_doconv # a0 <- op, a0-a3 changed
+
+.LOP_FLOAT_TO_INT_set_vreg:
+ SET_VREG(v0, rOBJ) # vAA <- result0
+#else
+ b f2i_doconv
+
+.LOP_FLOAT_TO_INT_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ)
+#endif
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ GOTO_OPCODE(t1) # jump to next instruction
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: mips/OP_FLOAT_TO_LONG.S */
+/* File: mips/unflopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, a3) # a0 <- vB
+#else
+ GET_VREG_F(fa0, a3)
+#endif
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ b f2l_doconv # result <- op, a0-a3 changed
+
+.LOP_FLOAT_TO_LONG_set_vreg:
+ STORE64(rRESULT0, rRESULT1, rOBJ) # vA/vA+1 <- a0/a1
+#else
+ b f2l_doconv
+
+.LOP_FLOAT_TO_LONG_set_vreg:
+ STORE64(rRESULT0, rRESULT1, rOBJ) # vA/vA+1 <- a0/a1
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: mips/OP_FLOAT_TO_DOUBLE.S */
+/* File: mips/unflopWider.S */
+ /*
+ * Generic 32bit-to-64bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0", where
+ * "result" is a 64-bit quantity in a0/a1.
+ *
+ * For: int-to-double, float-to-long, float-to-double
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, a3) # a0 <- vB
+#else
+ GET_VREG_F(fa0, a3)
+#endif
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__extendsfdf2) # result <- op, a0-a3 changed
+
+.LOP_FLOAT_TO_DOUBLE_set_vreg:
+ STORE64(rRESULT0, rRESULT1, rOBJ) # vA/vA+1 <- a0/a1
+#else
+ cvt.d.s fv0, fa0
+
+.LOP_FLOAT_TO_DOUBLE_set_vreg:
+ STORE64_F(fv0, fv0f, rOBJ) # vA/vA+1 <- a0/a1
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: mips/OP_DOUBLE_TO_INT.S */
+/* File: mips/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0/a1", where
+ * "result" is a 32-bit quantity in a0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ * If hard floating point support is available, use fa0 as the parameter, except for
+ * long-to-float opcode.
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t1 <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a3) # a0/a1 <- vB/vB+1
+#else
+ LOAD64_F(fa0, fa0f, a3)
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ b d2i_doconv # a0 <- op, a0-a3 changed
+
+.LOP_DOUBLE_TO_INT_set_vreg:
+ SET_VREG(v0, rOBJ) # vA <- result0
+#else
+ b d2i_doconv
+
+.LOP_DOUBLE_TO_INT_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ) # vA <- result0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
+
+/*
+ * Convert the double in a0/a1 to an int in a0.
+ *
+ * We have to clip values to int min/max per the specification. The
+ * expected common case is a "reasonable" value that converts directly
+ * to modest integer. The EABI convert function isn't doing this for us.
+ * Use rBIX / rTEMP as global to hold arguments (they are not bound to a global var)
+ */
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: mips/OP_DOUBLE_TO_LONG.S */
+/* File: mips/unflopWide.S */
+ /*
+ * Generic 64-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0/a1".
+ * This could be a MIPS instruction or a function call.
+ *
+ * long-to-double, double-to-long
+ */
+ /* unop vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # t1 <- &fp[A]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a3) # a0/a1 <- vAA
+#else
+ LOAD64_F(fa0, fa0f, a3)
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ b d2l_doconv # a0/a1 <- op, a2-a3 changed
+
+.LOP_DOUBLE_TO_LONG_set_vreg:
+#ifdef SOFT_FLOAT
+ STORE64(rRESULT0, rRESULT1, rOBJ) # vAA <- a0/a1
+#else
+ STORE64(rRESULT0, rRESULT1, rOBJ) # vAA <- a0/a1
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: mips/OP_DOUBLE_TO_FLOAT.S */
+/* File: mips/unopNarrower.S */
+ /*
+ * Generic 64bit-to-32bit unary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = op a0/a1", where
+ * "result" is a 32-bit quantity in a0.
+ *
+ * For: long-to-float, double-to-int, double-to-float
+ * If hard floating point support is available, use fa0 as the parameter, except for
+ * long-to-float opcode.
+ * (This would work for long-to-int, but that instruction is actually
+ * an exact match for OP_MOVE.)
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(rOBJ) # t1 <- A+
+ EAS2(a3, rFP, a3) # a3 <- &fp[B]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a3) # a0/a1 <- vB/vB+1
+#else
+ LOAD64_F(fa0, fa0f, a3)
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__truncdfsf2) # a0 <- op, a0-a3 changed
+
+.LOP_DOUBLE_TO_FLOAT_set_vreg:
+ SET_VREG(v0, rOBJ) # vA <- result0
+#else
+ cvt.s.d fv0, fa0
+
+.LOP_DOUBLE_TO_FLOAT_set_vreg_f:
+ SET_VREG_F(fv0, rOBJ) # vA <- result0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-11 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INT_TO_BYTE: /* 0x8d */
+/* File: mips/OP_INT_TO_BYTE.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sll a0, a0, 24 # optional op
+ sra a0, a0, 24 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INT_TO_CHAR: /* 0x8e */
+/* File: mips/OP_INT_TO_CHAR.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+ and a0, 0xffff # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INT_TO_SHORT: /* 0x8f */
+/* File: mips/OP_INT_TO_SHORT.S */
+/* File: mips/unop.S */
+ /*
+ * Generic 32-bit unary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = op a0".
+ * This could be a MIPS instruction or a function call.
+ *
+ * for: neg-int, not-int, neg-float, int-to-float, float-to-int,
+ * int-to-byte, int-to-char, int-to-short
+ */
+ /* unop vA, vB */
+ GET_OPB(a3) # a3 <- B
+ GET_OPA4(t0) # t0 <- A+
+ GET_VREG(a0, a3) # a0 <- vB
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ sll a0, 16 # optional op
+ sra a0, 16 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ SET_VREG_GOTO(a0, t0, t1) # vAA <- result0
+ /* 9-10 instructions */
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_INT: /* 0x90 */
+/* File: mips/OP_ADD_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SUB_INT: /* 0x91 */
+/* File: mips/OP_SUB_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ subu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_INT: /* 0x92 */
+/* File: mips/OP_MUL_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_INT: /* 0x93 */
+/* File: mips/OP_DIV_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ div zero, a0, a1; mflo a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_INT: /* 0x94 */
+/* File: mips/OP_REM_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ div zero, a0, a1; mfhi a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AND_INT: /* 0x95 */
+/* File: mips/OP_AND_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_OR_INT: /* 0x96 */
+/* File: mips/OP_OR_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_XOR_INT: /* 0x97 */
+/* File: mips/OP_XOR_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHL_INT: /* 0x98 */
+/* File: mips/OP_SHL_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ and a1, a1, 31 # optional op
+ sll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHR_INT: /* 0x99 */
+/* File: mips/OP_SHR_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ and a1, a1, 31 # optional op
+ sra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_USHR_INT: /* 0x9a */
+/* File: mips/OP_USHR_INT.S */
+/* File: mips/binop.S */
+ /*
+ * Generic 32-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0 op a1".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus. Note that we
+ * *don't* check for (INT_MIN / -1) here, because the ARM math lib
+ * handles it correctly.
+ *
+ * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
+ * xor-int, shl-int, shr-int, ushr-int
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ and a1, a1, 31 # optional op
+ srl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_LONG: /* 0x9b */
+/* File: mips/OP_ADD_LONG.S */
+/*
+ * The compiler generates the following sequence for
+ * [v1 v0] = [a1 a0] + [a3 a2];
+ * addu v0,a2,a0
+ * addu a1,a3,a1
+ * sltu v1,v0,a2
+ * addu v1,v1,a1
+ */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ addu v0, a2, a0 # optional op
+ addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SUB_LONG: /* 0x9c */
+/* File: mips/OP_SUB_LONG.S */
+/*
+ * For little endian the code sequence looks as follows:
+ * subu v0,a0,a2
+ * subu v1,a1,a3
+ * sltu a0,a0,v0
+ * subu v1,v1,a0
+ */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ subu v0, a0, a2 # optional op
+ subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_LONG: /* 0x9d */
+/* File: mips/OP_MUL_LONG.S */
+ /*
+ * Signed 64-bit integer multiply.
+ * a1 a0
+ * x a3 a2
+ * -------------
+ * a2a1 a2a0
+ * a3a0
+ * a3a1 (<= unused)
+ * ---------------
+ * v1 v0
+ */
+ /* mul-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ and t0, a0, 255 # a2 <- BB
+ srl t1, a0, 8 # a3 <- CC
+ EAS2(t0, rFP, t0) # t0 <- &fp[BB]
+ LOAD64(a0, a1, t0) # a0/a1 <- vBB/vBB+1
+
+ EAS2(t1, rFP, t1) # t0 <- &fp[CC]
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+
+ mul v1, a3, a0 # v1= a3a0
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+ mul t0, a2, a1 # t0= a2a1
+ addu v1, v1, t1 # v1+= hi(a2a0)
+ addu v1, v1, t0 # v1= a3a0 + a2a1;
+
+ GET_OPA(a0) # a0 <- AA
+ EAS2(a0, rFP, a0) # a0 <- &fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ b .LOP_MUL_LONG_finish
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_LONG: /* 0x9e */
+/* File: mips/OP_DIV_LONG.S */
+#ifdef HAVE_LITTLE_ENDIAN
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 1
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ JAL(__divdi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+#else
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a1, a0, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a3, a2, t1) # a2/a3 <- vCC/vCC+1
+ .if 1
+ or t0, a3, a2 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ JAL(__divdi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v1, v0, rOBJ) # vAA/vAA+1 <- v1/v0
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_LONG: /* 0x9f */
+/* File: mips/OP_REM_LONG.S */
+/* ldivmod returns quotient in a0/a1 and remainder in a2/a3 */
+#ifdef HAVE_LITTLE_ENDIAN
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 1
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ JAL(__moddi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+#else
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a1, a0, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a3, a2, t1) # a2/a3 <- vCC/vCC+1
+ .if 1
+ or t0, a3, a2 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ JAL(__moddi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v1, v0, rOBJ) # vAA/vAA+1 <- v1/v0
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AND_LONG: /* 0xa0 */
+/* File: mips/OP_AND_LONG.S */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ and a0, a0, a2 # optional op
+ and a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_OR_LONG: /* 0xa1 */
+/* File: mips/OP_OR_LONG.S */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ or a0, a0, a2 # optional op
+ or a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_XOR_LONG: /* 0xa2 */
+/* File: mips/OP_XOR_LONG.S */
+/* File: mips/binopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+ LOAD64(a0, a1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(a2, a3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ xor a0, a0, a2 # optional op
+ xor a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHL_LONG: /* 0xa3 */
+/* File: mips/OP_SHL_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shl-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t2) # t2 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+
+ EAS2(t2, rFP, t2) # t2 <- &fp[AA]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ andi a2, 0x20 # shift< shift & 0x20
+ movn v1, v0, a2 # rhi<- rlo (if shift&0x20)
+ movn v0, zero, a2 # rlo<- 0 (if shift&0x20)
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, t2) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHR_LONG: /* 0xa4 */
+/* File: mips/OP_SHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* shr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t3) # t3 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+ EAS2(t3, rFP, t3) # t3 <- &fp[AA]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ sra a3, a1, 31 # a3<- sign(ah)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ andi a2, 0x20 # shift & 0x20
+ movn v0, v1, a2 # rlo<- rhi (if shift&0x20)
+ movn v1, a3, a2 # rhi<- sign(ahi) (if shift&0x20)
+
+ STORE64(v0, v1, t3) # vAA/VAA+1 <- v0/v0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_USHR_LONG: /* 0xa5 */
+/* File: mips/OP_USHR_LONG.S */
+ /*
+ * Long integer shift. This is different from the generic 32/64-bit
+ * binary operations because vAA/vBB are 64-bit but vCC (the shift
+ * distance) is 32-bit. Also, Dalvik requires us to mask off the low
+ * 6 bits of the shift distance.
+ */
+ /* ushr-long vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(t0) # t3 <- AA
+ and a3, a0, 255 # a3 <- BB
+ srl a0, a0, 8 # a0 <- CC
+ EAS2(a3, rFP, a3) # a3 <- &fp[BB]
+ GET_VREG(a2, a0) # a2 <- vCC
+ LOAD64(a0, a1, a3) # a0/a1 <- vBB/vBB+1
+ EAS2(rOBJ, rFP, t0) # rOBJ <- &fp[AA]
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ andi a2, 0x20 # shift & 0x20
+ movn v0, v1, a2 # rlo<- rhi (if shift&0x20)
+ movn v1, zero, a2 # rhi<- 0 (if shift&0x20)
+
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_FLOAT: /* 0xa6 */
+/* File: mips/OP_ADD_FLOAT.S */
+/* File: mips/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+#ifdef SOFT_FLOAT
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1 # condition bit and comparision with 0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__addsf3) # v0 = result
+ SET_VREG(v0, rOBJ) # vAA <- v0
+#else
+ add.s fv0, fa0, fa1 # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SUB_FLOAT: /* 0xa7 */
+/* File: mips/OP_SUB_FLOAT.S */
+/* File: mips/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+#ifdef SOFT_FLOAT
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1 # condition bit and comparision with 0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__subsf3) # v0 = result
+ SET_VREG(v0, rOBJ) # vAA <- v0
+#else
+ sub.s fv0, fa0, fa1 # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_FLOAT: /* 0xa8 */
+/* File: mips/OP_MUL_FLOAT.S */
+/* File: mips/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+#ifdef SOFT_FLOAT
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1 # condition bit and comparision with 0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__mulsf3) # v0 = result
+ SET_VREG(v0, rOBJ) # vAA <- v0
+#else
+ mul.s fv0, fa0, fa1 # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_FLOAT: /* 0xa9 */
+/* File: mips/OP_DIV_FLOAT.S */
+/* File: mips/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+#ifdef SOFT_FLOAT
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1 # condition bit and comparision with 0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__divsf3) # v0 = result
+ SET_VREG(v0, rOBJ) # vAA <- v0
+#else
+ div.s fv0, fa0, fa1 # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_FLOAT: /* 0xaa */
+/* File: mips/OP_REM_FLOAT.S */
+/* File: mips/binflop.S */
+ /*
+ * Generic 32-bit binary float operation.
+ *
+ * For: add-fp, sub-fp, mul-fp, div-fp
+ */
+
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ srl a3, a0, 8 # a3 <- CC
+ and a2, a0, 255 # a2 <- BB
+#ifdef SOFT_FLOAT
+ GET_VREG(a1, a3) # a1 <- vCC
+ GET_VREG(a0, a2) # a0 <- vBB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa1, a3) # a1 <- vCC
+ GET_VREG_F(fa0, a2) # a0 <- vBB
+
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1 # condition bit and comparision with 0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(fmodf) # v0 = result
+ SET_VREG(v0, rOBJ) # vAA <- v0
+#else
+ JAL(fmodf) # f0 = result
+ SET_VREG_F(fv0, rOBJ) # vAA <- fv0
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 11-14 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_DOUBLE: /* 0xab */
+/* File: mips/OP_ADD_DOUBLE.S */
+/* File: mips/binflopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__adddf3) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ add.d fv0, fa0, fa1
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SUB_DOUBLE: /* 0xac */
+/* File: mips/OP_SUB_DOUBLE.S */
+/* File: mips/binflopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__subdf3) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ sub.d fv0, fa0, fa1
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_DOUBLE: /* 0xad */
+/* File: mips/OP_MUL_DOUBLE.S */
+/* File: mips/binflopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__muldf3) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ mul.d fv0, fa0, fa1
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_DOUBLE: /* 0xae */
+/* File: mips/OP_DIV_DOUBLE.S */
+/* File: mips/binflopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__divdf3) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ div.d fv0, fa0, fa1
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_DOUBLE: /* 0xaf */
+/* File: mips/OP_REM_DOUBLE.S */
+/* File: mips/binflopWide.S */
+ /*
+ * Generic 64-bit binary operation. Provide an "instr" line that
+ * specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
+ * xor-long, add-double, sub-double, mul-double, div-double,
+ * rem-double
+ *
+ * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
+ */
+ /* binop vAA, vBB, vCC */
+ FETCH(a0, 1) # a0 <- CCBB
+ GET_OPA(rOBJ) # s5 <- AA
+ and a2, a0, 255 # a2 <- BB
+ srl a3, a0, 8 # a3 <- CC
+ EAS2(rOBJ, rFP, rOBJ) # s5 <- &fp[AA]
+ EAS2(a2, rFP, a2) # a2 <- &fp[BB]
+ EAS2(t1, rFP, a3) # a3 <- &fp[CC]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG0, rARG1, a2) # a0/a1 <- vBB/vBB+1
+ LOAD64(rARG2, rARG3, t1) # a2/a3 <- vCC/vCC+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, a2)
+ LOAD64_F(fa1, fa1f, t1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(fmod) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ JAL(fmod)
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 14-17 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: mips/OP_ADD_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: mips/OP_SUB_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ subu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: mips/OP_MUL_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: mips/OP_DIV_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ div zero, a0, a1; mflo a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: mips/OP_REM_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ div zero, a0, a1; mfhi a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: mips/OP_AND_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: mips/OP_OR_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: mips/OP_XOR_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: mips/OP_SHL_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ and a1, a1, 31 # optional op
+ sll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: mips/OP_SHR_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ and a1, a1, 31 # optional op
+ sra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: mips/OP_USHR_INT_2ADDR.S */
+/* File: mips/binop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
+ * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
+ * shl-int/2addr, shr-int/2addr, ushr-int/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ and a1, a1, 31 # optional op
+ srl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: mips/OP_ADD_LONG_2ADDR.S */
+/*
+ *See OP_ADD_LONG.S for details
+ */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ addu v0, a2, a0 # optional op
+ addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: mips/OP_SUB_LONG_2ADDR.S */
+/*
+ * See comments in OP_SUB_LONG.S
+ */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ subu v0, a0, a2 # optional op
+ subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: mips/OP_MUL_LONG_2ADDR.S */
+ /*
+ * See comments in OP_MUL_LONG.S
+ */
+ /* mul-long/2addr vA, vB */
+ GET_OPA4(t0) # t0 <- A+
+
+ EAS2(t0, rFP, t0) # t0 <- &fp[A]
+ LOAD64(a0, a1, t0) # vAA.low / high
+
+ GET_OPB(t1) # t1 <- B
+ EAS2(t1, rFP, t1) # t1 <- &fp[B]
+ LOAD64(a2, a3, t1) # vBB.low / high
+
+ mul v1, a3, a0 # v1= a3a0
+ multu a2, a0
+ mfhi t1
+ mflo v0 # v0= a2a0
+ mul t2, a2, a1 # t2= a2a1
+ addu v1, v1, t1 # v1= a3a0 + hi(a2a0)
+ addu v1, v1, t2 # v1= v1 + a2a1;
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ GET_INST_OPCODE(t1) # extract opcode from rINST
+ # vAA <- v0 (low)
+ STORE64(v0, v1, t0) # vAA+1 <- v1 (high)
+ GOTO_OPCODE(t1) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: mips/OP_DIV_LONG_2ADDR.S */
+#ifdef HAVE_LITTLE_ENDIAN
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 1
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ JAL(__divdi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+#else
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64(a3, a2, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a1, a0, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 1
+ or t0, a3, a2 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ JAL(__divdi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v1, v0, rOBJ) # vAA/vAA+1 <- v1/v0
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: mips/OP_REM_LONG_2ADDR.S */
+#ifdef HAVE_LITTLE_ENDIAN
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 1
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ JAL(__moddi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- v0/v1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+#else
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64(a3, a2, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a1, a0, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 1
+ or t0, a3, a2 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ # optional op
+ JAL(__moddi3) # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v1, v0, rOBJ) # vAA/vAA+1 <- v1/v0
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+#endif
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: mips/OP_AND_LONG_2ADDR.S */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ and a0, a0, a2 # optional op
+ and a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: mips/OP_OR_LONG_2ADDR.S */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ or a0, a0, a2 # optional op
+ or a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: mips/OP_XOR_LONG_2ADDR.S */
+/* File: mips/binopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be a MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
+ * and-long/2addr, or-long/2addr, xor-long/2addr
+ * rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+ LOAD64(a2, a3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, a2, a3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ xor a0, a0, a2 # optional op
+ xor a1, a1, a3 # result <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: mips/OP_SHL_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shl-long/2addr vA, vB */
+ GET_OPA4(t2) # t2 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(rOBJ, rFP, t2) # rOBJ <- &fp[A]
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ sll v0, a0, a2 # rlo<- alo << (shift&31)
+ not v1, a2 # rhi<- 31-shift (shift is 5b)
+ srl a0, 1
+ srl a0, v1 # alo<- alo >> (32-(shift&31))
+ sll v1, a1, a2 # rhi<- ahi << (shift&31)
+ or v1, a0 # rhi<- rhi | alo
+ andi a2, 0x20 # shift< shift & 0x20
+ movn v1, v0, a2 # rhi<- rlo (if shift&0x20)
+ movn v0, zero, a2 # rlo<- 0 (if shift&0x20)
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, rOBJ) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: mips/OP_SHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* shr-long/2addr vA, vB */
+ GET_OPA4(t2) # t2 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t2, rFP, t2) # t2 <- &fp[A]
+ LOAD64(a0, a1, t2) # a0/a1 <- vAA/vAA+1
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+
+ sra v1, a1, a2 # rhi<- ahi >> (shift&31)
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ sra a3, a1, 31 # a3<- sign(ah)
+ not a0, a2 # alo<- 31-shift (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ andi a2, 0x20 # shift & 0x20
+ movn v0, v1, a2 # rlo<- rhi (if shift&0x20)
+ movn v1, a3, a2 # rhi<- sign(ahi) (if shift&0x20)
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, t2) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: mips/OP_USHR_LONG_2ADDR.S */
+ /*
+ * Long integer shift, 2addr version. vA is 64-bit value/result, vB is
+ * 32-bit shift distance.
+ */
+ /* ushr-long/2addr vA, vB */
+ GET_OPA4(t3) # t3 <- A+
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a2, a3) # a2 <- vB
+ EAS2(t3, rFP, t3) # t3 <- &fp[A]
+ LOAD64(a0, a1, t3) # a0/a1 <- vAA/vAA+1
+
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ srl v1, a1, a2 # rhi<- ahi >> (shift&31)
+ srl v0, a0, a2 # rlo<- alo >> (shift&31)
+ not a0, a2 # alo<- 31-n (shift is 5b)
+ sll a1, 1
+ sll a1, a0 # ahi<- ahi << (32-(shift&31))
+ or v0, a1 # rlo<- rlo | ahi
+ andi a2, 0x20 # shift & 0x20
+ movn v0, v1, a2 # rlo<- rhi (if shift&0x20)
+ movn v1, zero, a2 # rhi<- 0 (if shift&0x20)
+
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, t3) # vAA/vAA+1 <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: mips/OP_ADD_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" and
+ * "instr_f" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__addsf3) # result <- op, a0-a3 changed
+ SET_VREG(v0, rOBJ) # vAA <- result
+#else
+ add.s fv0, fa0, fa1
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: mips/OP_SUB_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" and
+ * "instr_f" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__subsf3) # result <- op, a0-a3 changed
+ SET_VREG(v0, rOBJ) # vAA <- result
+#else
+ sub.s fv0, fa0, fa1
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: mips/OP_MUL_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" and
+ * "instr_f" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__mulsf3) # result <- op, a0-a3 changed
+ SET_VREG(v0, rOBJ) # vAA <- result
+#else
+ mul.s fv0, fa0, fa1
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: mips/OP_DIV_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" and
+ * "instr_f" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__divsf3) # result <- op, a0-a3 changed
+ SET_VREG(v0, rOBJ) # vAA <- result
+#else
+ div.s fv0, fa0, fa1
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: mips/OP_REM_FLOAT_2ADDR.S */
+/* File: mips/binflop2addr.S */
+ /*
+ * Generic 32-bit "/2addr" binary operation. Provide an "instr" and
+ * "instr_f" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
+ * div-float/2addr, rem-float/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # t1 <- A+
+ GET_OPB(a3) # a3 <- B
+#ifdef SOFT_FLOAT
+ GET_VREG(a0, rOBJ) # a0 <- vA
+ GET_VREG(a1, a3) # a1 <- vB
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+#else
+ GET_VREG_F(fa0, rOBJ)
+ GET_VREG_F(fa1, a3)
+ .if 0
+ # is second operand zero?
+ li.s ft0, 0
+ c.eq.s fcc0, ft0, fa1
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(fmodf) # result <- op, a0-a3 changed
+ SET_VREG(v0, rOBJ) # vAA <- result
+#else
+ JAL(fmodf)
+ SET_VREG_F(fv0, rOBJ) # vAA <- result
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: mips/OP_ADD_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG2, rARG3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, rOBJ)
+ LOAD64_F(fa1, fa1f, a1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__adddf3) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ add.d fv0, fa0, fa1
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: mips/OP_SUB_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG2, rARG3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, rOBJ)
+ LOAD64_F(fa1, fa1f, a1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__subdf3) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ sub.d fv0, fa0, fa1
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: mips/OP_MUL_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG2, rARG3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, rOBJ)
+ LOAD64_F(fa1, fa1f, a1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__muldf3) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ mul.d fv0, fa0, fa1
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: mips/OP_DIV_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG2, rARG3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, rOBJ)
+ LOAD64_F(fa1, fa1f, a1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(__divdf3) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ div.d fv0, fa0, fa1
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: mips/OP_REM_DOUBLE_2ADDR.S */
+/* File: mips/binflopWide2addr.S */
+ /*
+ * Generic 64-bit "/2addr" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0-a1 op a2-a3".
+ * This could be an MIPS instruction or a function call.
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
+ * div-double/2addr, rem-double/2addr
+ */
+ /* binop/2addr vA, vB */
+ GET_OPA4(rOBJ) # rOBJ <- A+
+ GET_OPB(a1) # a1 <- B
+ EAS2(a1, rFP, a1) # a1 <- &fp[B]
+ EAS2(rOBJ, rFP, rOBJ) # rOBJ <- &fp[A]
+#ifdef SOFT_FLOAT
+ LOAD64(rARG2, rARG3, a1) # a2/a3 <- vBB/vBB+1
+ LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vAA/vAA+1
+ .if 0
+ or t0, rARG2, rARG3 # second arg (a2-a3) is zero?
+ beqz t0, common_errDivideByZero
+ .endif
+#else
+ LOAD64_F(fa0, fa0f, rOBJ)
+ LOAD64_F(fa1, fa1f, a1)
+ .if 0
+ li.d ft0, 0
+ c.eq.d fcc0, fa1, ft0
+ bc1t fcc0, common_errDivideByZero
+ .endif
+#endif
+1:
+ FETCH_ADVANCE_INST(1) # advance rPC, load rINST
+ # optional op
+#ifdef SOFT_FLOAT
+ JAL(fmod) # result <- op, a0-a3 changed
+ STORE64(rRESULT0, rRESULT1, rOBJ)
+#else
+ JAL(fmod)
+ STORE64_F(fv0, fv0f, rOBJ)
+#endif
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /* 12-15 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: mips/OP_ADD_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_RSUB_INT: /* 0xd1 */
+/* File: mips/OP_RSUB_INT.S */
+/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ subu a0, a1, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: mips/OP_MUL_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: mips/OP_DIV_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 1
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ div zero, a0, a1; mflo a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: mips/OP_REM_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 1
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ div zero, a0, a1; mfhi a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: mips/OP_AND_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: mips/OP_OR_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: mips/OP_XOR_INT_LIT16.S */
+/* File: mips/binopLit16.S */
+ /*
+ * Generic 32-bit "lit16" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
+ * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
+ */
+ # binop/lit16 vA, vB, /* +CCCC */
+ FETCH_S(a1, 1) # a1 <- ssssCCCC (sign-extended)
+ GET_OPB(a2) # a2 <- B
+ GET_OPA(rOBJ) # rOBJ <- A+
+ GET_VREG(a0, a2) # a0 <- vB
+ and rOBJ, rOBJ, 15
+ .if 0
+ # cmp a1, 0; is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-13 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: mips/OP_ADD_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ addu a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: mips/OP_RSUB_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ subu a0, a1, a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_MUL_INT_LIT8: /* 0xda */
+/* File: mips/OP_MUL_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ mul a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: mips/OP_DIV_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ div zero, a0, a1; mflo a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_REM_INT_LIT8: /* 0xdc */
+/* File: mips/OP_REM_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 1
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ div zero, a0, a1; mfhi a0 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_AND_INT_LIT8: /* 0xdd */
+/* File: mips/OP_AND_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ and a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_OR_INT_LIT8: /* 0xde */
+/* File: mips/OP_OR_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ or a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: mips/OP_XOR_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ # optional op
+ xor a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: mips/OP_SHL_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ and a1, a1, 31 # optional op
+ sll a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: mips/OP_SHR_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ and a1, a1, 31 # optional op
+ sra a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: mips/OP_USHR_INT_LIT8.S */
+/* File: mips/binopLit8.S */
+ /*
+ * Generic 32-bit "lit8" binary operation. Provide an "instr" line
+ * that specifies an instruction that performs "result = a0 op a1".
+ * This could be an MIPS instruction or a function call. (If the result
+ * comes back in a register other than a0, you can override "result".)
+ *
+ * If "chkzero" is set to 1, we perform a divide-by-zero check on
+ * vCC (a1). Useful for integer division and modulus.
+ *
+ * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
+ * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
+ * shl-int/lit8, shr-int/lit8, ushr-int/lit8
+ */
+ # binop/lit8 vAA, vBB, /* +CC */
+ FETCH_S(a3, 1) # a3 <- ssssCCBB (sign-extended for CC)
+ GET_OPA(rOBJ) # rOBJ <- AA
+ and a2, a3, 255 # a2 <- BB
+ GET_VREG(a0, a2) # a0 <- vBB
+ sra a1, a3, 8 # a1 <- ssssssCC (sign extended)
+ .if 0
+ # is second operand zero?
+ beqz a1, common_errDivideByZero
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+
+ and a1, a1, 31 # optional op
+ srl a0, a0, a1 # a0 <- op, a0-a3 changed
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a0, rOBJ, t0) # vAA <- a0
+ /* 10-12 instructions */
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: mips/OP_IGET_VOLATILE.S */
+/* File: mips/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_VOLATILE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test results
+ move a0, v0
+ bnez v0, .LOP_IGET_VOLATILE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: mips/OP_IPUT_VOLATILE.S */
+/* File: mips/OP_IPUT.S */
+ /*
+ * General 32-bit instance field put.
+ *
+ * for: iput, iput-object, iput-boolean, iput-byte, iput-char, iput-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_VOLATILE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_VOLATILE_finish # yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: mips/OP_SGET_VOLATILE.S */
+/* File: mips/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_VOLATILE_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_VOLATILE_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: mips/OP_SPUT_VOLATILE.S */
+/* File: mips/OP_SPUT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput, sput-object, sput-boolean, sput-byte, sput-char, sput-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_VOLATILE_finish # is resolved entry null?
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_VOLATILE_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: mips/OP_IGET_OBJECT_VOLATILE.S */
+/* File: mips/OP_IGET.S */
+ /*
+ * General 32-bit instance field get.
+ *
+ * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_OBJECT_VOLATILE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test results
+ move a0, v0
+ bnez v0, .LOP_IGET_OBJECT_VOLATILE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: mips/OP_IGET_WIDE_VOLATILE.S */
+/* File: mips/OP_IGET_WIDE.S */
+ /*
+ * Wide 32-bit instance field get.
+ */
+ # iget-wide vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_WIDE_VOLATILE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # test return code
+ move a0, v0
+ bnez v0, .LOP_IGET_WIDE_VOLATILE_finish
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: mips/OP_IPUT_WIDE_VOLATILE.S */
+/* File: mips/OP_IPUT_WIDE.S */
+ # iput-wide vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_WIDE_VOLATILE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_WIDE_VOLATILE_finish # yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: mips/OP_SGET_WIDE_VOLATILE.S */
+/* File: mips/OP_SGET_WIDE.S */
+ /*
+ * 64-bit SGET handler.
+ */
+ # sget-wide vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_SGET_WIDE_VOLATILE_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in v0.
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+
+ b .LOP_SGET_WIDE_VOLATILE_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: mips/OP_SPUT_WIDE_VOLATILE.S */
+/* File: mips/OP_SPUT_WIDE.S */
+ /*
+ * 64-bit SPUT handler.
+ */
+ # sput-wide vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ GET_OPA(t0) # t0 <- AA
+ LOAD_eas2(a2, rBIX, a1) # a2 <- resolved StaticField ptr
+ EAS2(rOBJ, rFP, t0) # rOBJ<- &fp[AA]
+ # is resolved entry null?
+ beqz a2, .LOP_SPUT_WIDE_VOLATILE_resolve # yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_finish: # field ptr in a2, AA in rOBJ
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vAA/vAA+1
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ .if 1
+ addu a2, offStaticField_value # a2<- pointer to data
+ JAL(dvmQuasiAtomicSwap64Sync) # stores a0/a1 into addr a2
+ .else
+ STORE64_off(a0, a1, a2, offStaticField_value) # field <- vAA/vAA+1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_BREAKPOINT: /* 0xec */
+ /* (stub) */
+ SAVE_PC_TO_SELF() # only need to export PC and FP
+ SAVE_FP_TO_SELF()
+ move a0, rSELF # self is first arg to function
+ JAL(dvmMterp_OP_BREAKPOINT) # call
+ LOAD_PC_FROM_SELF() # retrieve updated values
+ LOAD_FP_FROM_SELF()
+ FETCH_INST() # load next instruction from rPC
+ GET_INST_OPCODE(t0) # ...trim down to just the opcode
+ GOTO_OPCODE(t0) # ...and jump to the handler
+/* ------------------------------ */
+ .balign 128
+.L_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: mips/OP_THROW_VERIFICATION_ERROR.S */
+ /*
+ * Handle a throw-verification-error instruction. This throws an
+ * exception for an error discovered during verification. The
+ * exception is indicated by AA, with some detail provided by BBBB.
+ */
+ /* op AA, ref@BBBB */
+
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ FETCH(a2, 1) # a2 <- BBBB
+ EXPORT_PC() # export the PC
+ GET_OPA(a1) # a1 <- AA
+ JAL(dvmThrowVerificationError) # always throws
+ b common_exceptionThrown # handle exception
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_EXECUTE_INLINE: /* 0xee */
+/* File: mips/OP_EXECUTE_INLINE.S */
+ /*
+ * Execute a "native inline" instruction.
+ *
+ * We need to call an InlineOp4Func:
+ * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+ *
+ * The first four args are in a0-a3, pointer to return value storage
+ * is on the stack. The function's return value is a flag that tells
+ * us if an exception was thrown.
+ *
+ * TUNING: could maintain two tables, pointer in Thread and
+ * swap if profiler/debuggger active.
+ */
+ /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */
+ lhu a2, offThread_subMode(rSELF)
+ FETCH(rBIX, 1) # rBIX <- BBBB
+ EXPORT_PC() # can throw
+ and a2, kSubModeDebugProfile # Any going on?
+ bnez a2, .LOP_EXECUTE_INLINE_debugmode # yes - take slow path
+.LOP_EXECUTE_INLINE_resume:
+ addu a1, rSELF, offThread_retval # a1 <- &self->retval
+ GET_OPB(a0) # a0 <- B
+ # Stack should have 16/20 available
+ sw a1, STACK_OFFSET_ARG04(sp) # push &self->retval
+ BAL(.LOP_EXECUTE_INLINE_continue) # make call; will return after
+ lw gp, STACK_OFFSET_GP(sp) # restore gp
+ # test boolean result of inline
+ beqz v0, common_exceptionThrown # returned false, handle exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: mips/OP_EXECUTE_INLINE_RANGE.S */
+ /*
+ * Execute a "native inline" instruction, using "/range" semantics.
+ * Same idea as execute-inline, but we get the args differently.
+ *
+ * We need to call an InlineOp4Func:
+ * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult)
+ *
+ * The first four args are in a0-a3, pointer to return value storage
+ * is on the stack. The function's return value is a flag that tells
+ * us if an exception was thrown.
+ */
+ /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */
+ lhu a2, offThread_subMode(rSELF)
+ FETCH(rBIX, 1) # rBIX<- BBBB
+ EXPORT_PC() # can throw
+ and a2, kSubModeDebugProfile # Any going on?
+ bnez a2, .LOP_EXECUTE_INLINE_RANGE_debugmode # yes - take slow path
+.LOP_EXECUTE_INLINE_RANGE_resume:
+ addu a1, rSELF, offThread_retval # a1<- &self->retval
+ GET_OPA(a0)
+ sw a1, STACK_OFFSET_ARG04(sp) # push &self->retval
+ BAL(.LOP_EXECUTE_INLINE_RANGE_continue) # make call; will return after
+ lw gp, STACK_OFFSET_GP(sp) # restore gp
+ beqz v0, common_exceptionThrown # returned false, handle exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: mips/OP_INVOKE_OBJECT_INIT_RANGE.S */
+ /*
+ * Invoke Object.<init> on an object. In practice we know that
+ * Object's nullary constructor doesn't do anything, so we just
+ * skip it unless a debugger is active.
+ */
+ FETCH(a1, 2) # a1<- CCCC
+ GET_VREG(a0, a1) # a0<- "this" ptr
+ # check for NULL
+ beqz a0, common_errNullObject # export PC and throw NPE
+ LOAD_base_offObject_clazz(a1, a0) # a1<- obj->clazz
+ LOAD_base_offClassObject_accessFlags(a2, a1) # a2<- clazz->accessFlags
+ and a2, CLASS_ISFINALIZABLE # is this class finalizable?
+ beqz a2, .LOP_INVOKE_OBJECT_INIT_RANGE_finish # no, go
+
+.LOP_INVOKE_OBJECT_INIT_RANGE_setFinal:
+ EXPORT_PC() # can throw
+ JAL(dvmSetFinalizable) # call dvmSetFinalizable(obj)
+ LOAD_offThread_exception(a0, rSELF) # a0<- self->exception
+ # exception pending?
+ bnez a0, common_exceptionThrown # yes, handle it
+
+.LOP_INVOKE_OBJECT_INIT_RANGE_finish:
+ lhu a1, offThread_subMode(rSELF)
+ and a1, kSubModeDebuggerActive # debugger active?
+ bnez a1, .LOP_INVOKE_OBJECT_INIT_RANGE_debugger # Yes - skip optimization
+ FETCH_ADVANCE_INST(2+1) # advance to next instr, load rINST
+ GET_INST_OPCODE(t0) # t0<- opcode from rINST
+ GOTO_OPCODE(t0) # execute it
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: mips/OP_RETURN_VOID_BARRIER.S */
+ SMP_DMB
+ b common_returnFromMethod
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_QUICK: /* 0xf2 */
+/* File: mips/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1 #
+ lw a0, 0(t0) # a0 <- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: mips/OP_IGET_WIDE_QUICK.S */
+ # iget-wide-quick vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1 # t0 <- a3 + a1
+ LOAD64(a0, a1, t0) # a0 <- obj.field (64 bits, aligned)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a3, rFP, a2)
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # fp[A] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: mips/OP_IGET_OBJECT_QUICK.S */
+/* File: mips/OP_IGET_QUICK.S */
+ /* For: iget-quick, iget-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- object we're operating on
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ # check object for null
+ beqz a3, common_errNullObject # object was null
+ addu t0, a3, a1 #
+ lw a0, 0(t0) # a0 <- obj.field (always 32 bits)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_QUICK: /* 0xf5 */
+/* File: mips/OP_IPUT_QUICK.S */
+ /* For: iput-quick, iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ sw a0, 0(t0) # obj.field (always 32 bits) <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: mips/OP_IPUT_WIDE_QUICK.S */
+ # iput-wide-quick vA, vB, offset /* CCCC */
+ GET_OPA4(a0) # a0 <- A(+)
+ GET_OPB(a1) # a1 <- B
+ GET_VREG(a2, a1) # a2 <- fp[B], the object pointer
+ EAS2(a3, rFP, a0) # a3 <- &fp[A]
+ LOAD64(a0, a1, a3) # a0/a1 <- fp[A]
+ # check object for null
+ beqz a2, common_errNullObject # object was null
+ FETCH(a3, 1) # a3 <- field byte offset
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu a2, a2, a3 # obj.field (64 bits, aligned) <- a0/a1
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0/a1
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: mips/OP_IPUT_OBJECT_QUICK.S */
+ /* For: iput-object-quick */
+ # op vA, vB, offset /* CCCC */
+ GET_OPB(a2) # a2 <- B
+ GET_VREG(a3, a2) # a3 <- fp[B], the object pointer
+ FETCH(a1, 1) # a1 <- field byte offset
+ GET_OPA4(a2) # a2 <- A(+)
+ beqz a3, common_errNullObject # object was null
+ GET_VREG(a0, a2) # a0 <- fp[A]
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ addu t0, a3, a1
+ sw a0, 0(t0) # obj.field (always 32 bits) <- a0
+ beqz a0, 1f
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ srl t1, a3, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, 0(t2)
+1:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: mips/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ FETCH(a3, 2) # a3 <- FEDC or CCCC
+ FETCH(a1, 1) # a1 <- BBBB
+ .if (!0)
+ and a3, a3, 15 # a3 <- C (or stays CCCC)
+ .endif
+ GET_VREG(rOBJ, a3) # rOBJ <- vC ("this" ptr)
+ # is "this" null?
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ LOAD_base_offObject_clazz(a2, rOBJ) # a2 <- thisPtr->clazz
+ LOAD_base_offClassObject_vtable(a2, a2) # a2 <- thisPtr->clazz->vtable
+ EXPORT_PC() # invoke must export
+ LOAD_eas2(a0, a2, a1) # a0 <- vtable[BBBB]
+ b common_invokeMethodNoRange # (a0=method, r9="this")
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: mips/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */
+/* File: mips/OP_INVOKE_VIRTUAL_QUICK.S */
+ /*
+ * Handle an optimized virtual method call.
+ *
+ * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ FETCH(a3, 2) # a3 <- FEDC or CCCC
+ FETCH(a1, 1) # a1 <- BBBB
+ .if (!1)
+ and a3, a3, 15 # a3 <- C (or stays CCCC)
+ .endif
+ GET_VREG(rOBJ, a3) # rOBJ <- vC ("this" ptr)
+ # is "this" null?
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ LOAD_base_offObject_clazz(a2, rOBJ) # a2 <- thisPtr->clazz
+ LOAD_base_offClassObject_vtable(a2, a2) # a2 <- thisPtr->clazz->vtable
+ EXPORT_PC() # invoke must export
+ LOAD_eas2(a0, a2, a1) # a0 <- vtable[BBBB]
+ b common_invokeMethodRange # (a0=method, r9="this")
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: mips/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ FETCH(t0, 2) # t0 <- GFED or CCCC
+ LOAD_rSELF_method(a2) # a2 <- current method
+ .if (!0)
+ and t0, t0, 15 # t0 <- D (or stays CCCC)
+ .endif
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offMethod_clazz(a2, a2) # a2 <- method->clazz
+ EXPORT_PC() # must export for invoke
+ LOAD_base_offClassObject_super(a2, a2) # a2 <- method->clazz->super
+ GET_VREG(rOBJ, t0) # rOBJ <- "this"
+ LOAD_base_offClassObject_vtable(a2, a2) # a2 <- ...clazz->super->vtable
+ # is "this" null ?
+ LOAD_eas2(a0, a2, a1) # a0 <- super->vtable[BBBB]
+ beqz rOBJ, common_errNullObject # "this" is null, throw exception
+ b common_invokeMethodNoRange # (a0=method, rOBJ="this")
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: mips/OP_INVOKE_SUPER_QUICK_RANGE.S */
+/* File: mips/OP_INVOKE_SUPER_QUICK.S */
+ /*
+ * Handle an optimized "super" method call.
+ *
+ * for: [opt] invoke-super-quick, invoke-super-quick/range
+ */
+ # op vB, {vD, vE, vF, vG, vA}, class /* CCCC */
+ # op vAA, {vCCCC..v(CCCC+AA-1)}, meth /* BBBB */
+ FETCH(t0, 2) # t0 <- GFED or CCCC
+ LOAD_rSELF_method(a2) # a2 <- current method
+ .if (!1)
+ and t0, t0, 15 # t0 <- D (or stays CCCC)
+ .endif
+ FETCH(a1, 1) # a1 <- BBBB
+ LOAD_base_offMethod_clazz(a2, a2) # a2 <- method->clazz
+ EXPORT_PC() # must export for invoke
+ LOAD_base_offClassObject_super(a2, a2) # a2 <- method->clazz->super
+ GET_VREG(rOBJ, t0) # rOBJ <- "this"
+ LOAD_base_offClassObject_vtable(a2, a2) # a2 <- ...clazz->super->vtable
+ # is "this" null ?
+ LOAD_eas2(a0, a2, a1) # a0 <- super->vtable[BBBB]
+ beqz rOBJ, common_errNullObject # "this" is null, throw exception
+ b common_invokeMethodRange # (a0=method, rOBJ="this")
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: mips/OP_IPUT_OBJECT_VOLATILE.S */
+/* File: mips/OP_IPUT_OBJECT.S */
+ /*
+ * 32-bit instance field put.
+ *
+ * for: iput-object, iput-object-volatile
+ */
+ # op vA, vB, field /* CCCC */
+ GET_OPB(a0) # a0 <- B
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref CCCC
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_OBJECT_VOLATILE_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ # success?
+ move a0, v0
+ bnez v0, .LOP_IPUT_OBJECT_VOLATILE_finish # yes, finish up
+ b common_exceptionThrown
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: mips/OP_SGET_OBJECT_VOLATILE.S */
+/* File: mips/OP_SGET.S */
+ /*
+ * General 32-bit SGET handler.
+ *
+ * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short
+ */
+ # op vAA, field /* BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_OBJECT_VOLATILE_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_OBJECT_VOLATILE_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: mips/OP_SPUT_OBJECT_VOLATILE.S */
+/* File: mips/OP_SPUT_OBJECT.S */
+ /*
+ * General 32-bit SPUT handler.
+ *
+ * for: sput-object, sput-object-volatile
+ */
+ /* op vAA, field@BBBB */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1 <- field ref BBBB
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_OBJECT_VOLATILE_finish # is resolved entry null?
+
+ /* Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_OBJECT_VOLATILE_finish # resume
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_DISPATCH_FF: /* 0xff */
+ /* (stub) */
+ SAVE_PC_TO_SELF() # only need to export PC and FP
+ SAVE_FP_TO_SELF()
+ move a0, rSELF # self is first arg to function
+ JAL(dvmMterp_OP_DISPATCH_FF) # call
+ LOAD_PC_FROM_SELF() # retrieve updated values
+ LOAD_FP_FROM_SELF()
+ FETCH_INST() # load next instruction from rPC
+ GET_INST_OPCODE(t0) # ...trim down to just the opcode
+ GOTO_OPCODE(t0) # ...and jump to the handler
+/* ------------------------------ */
+ .balign 128
+.L_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: mips/OP_CONST_CLASS_JUMBO.S */
+ /* const-class/jumbo vBBBB, Class@AAAAAAAA */
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ LOAD_rSELF_methodClassDex(a2) # a2 <- self->methodClassDex
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResClasses(a2, a2) # a2 <- dvmDex->pResClasses
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ FETCH(rOBJ, 3) # rOBJ<- BBBB
+ LOAD_eas2(v0, a2, a1) # v0 <- pResClasses[BBBB]
+
+ bnez v0, .LOP_CONST_CLASS_JUMBO_resolve # v0!=0 => resolved-ok
+ /*
+ * Continuation if the Class has not yet been resolved.
+ * a1: AAAAAAAA (Class ref)
+ * rOBJ: target register
+ */
+ EXPORT_PC()
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ li a2, 1 # a2 <- true
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- Class reference
+ # failed==0?
+ beqz v0, common_exceptionThrown # yup, handle the exception
+
+.LOP_CONST_CLASS_JUMBO_resolve:
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(v0, rOBJ, t0) # vBBBB <- v0
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: mips/OP_CHECK_CAST_JUMBO.S */
+ /*
+ * Check to see if a cast from one class to another is allowed.
+ */
+ /* check-cast/ jumbo vBBBB, class #AAAAAAAA */
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a3, 3) # a3<- BBBB
+ sll a2,a2,16
+ or a2, a0, a2 # a2<- AAAAaaaa
+
+ GET_VREG(rOBJ, a3) # rOBJ<- object
+ LOAD_rSELF_methodClassDex(a0) # a0<- pDvmDex
+ LOAD_base_offDvmDex_pResClasses(a0, a0) # a0<- pDvmDex->pResClasses
+ # is object null?
+ beqz rOBJ, .LOP_CHECK_CAST_JUMBO_okay # null obj, cast always succeeds
+ LOAD_eas2(a1, a0, a2) # a1<- resolved class
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0<- obj->clazz
+ # have we resolved this before?
+ beqz a1, .LOP_CHECK_CAST_JUMBO_resolve # not resolved, do it now
+.LOP_CHECK_CAST_JUMBO_resolved:
+ # same class (trivial success)?
+ bne a0, a1, .LOP_CHECK_CAST_JUMBO_fullcheck # no, do full check
+ b .LOP_CHECK_CAST_JUMBO_okay # yes, finish up
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * a0 holds obj->clazz
+ * a1 holds class resolved from BBBB
+ * rOBJ holds object
+ */
+.LOP_CHECK_CAST_JUMBO_fullcheck:
+ move rBIX,a1 # avoid ClassObject getting clobbered
+ JAL(dvmInstanceofNonTrivial) # v0<- boolean result
+ # failed?
+ bnez v0, .LOP_CHECK_CAST_JUMBO_okay # no, success
+ b .LOP_CHECK_CAST_JUMBO_castfailure
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: mips/OP_INSTANCE_OF_JUMBO.S */
+ /*
+ * Check to see if an object reference is an instance of a class.
+ *
+ * Most common situation is a non-null object, being compared against
+ * an already-resolved class.
+ *
+ * TODO: convert most of this into a common subroutine, shared with
+ * OP_INSTANCE_OF.S.
+ */
+ /* instance-of/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+ FETCH(a3, 4) # a3<- vCCCC
+ FETCH(rOBJ, 3) # rOBJ<- vBBBB
+ GET_VREG(a0, a3) # a0 <- vCCCC (object)
+ LOAD_rSELF_methodClassDex(a2) # a2 <- pDvmDex
+ # is object null?
+ beqz a0, .LOP_INSTANCE_OF_JUMBO_store # null obj, not an instance, store a0
+ FETCH(a1, 1) # r1<- aaaa (lo)
+ FETCH(a3, 2) # r3<- AAAA (hi)
+ LOAD_base_offDvmDex_pResClasses(a2, a2) # a2 <- pDvmDex->pResClasses
+ sll a3,a3,16
+ or a3, a1, a3 # a3<- AAAAaaaa
+
+ LOAD_eas2(a1, a2, a3) # a1 <- resolved class
+ LOAD_base_offObject_clazz(a0, a0) # a0 <- obj->clazz
+ # have we resolved this before?
+ beqz a1, .LOP_INSTANCE_OF_JUMBO_resolve # not resolved, do it now
+ b .LOP_INSTANCE_OF_JUMBO_resolved # resolved, continue
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: mips/OP_NEW_INSTANCE_JUMBO.S */
+ /*
+ * Create a new instance of a class.
+ */
+ /* new-instance/jumbo vBBBB, class@AAAAAAAA */
+ FETCH(a0, 1) # a0<- aaaa (lo)DvmDex
+ FETCH(a1, 2) # a1<- AAAA (hi)BBB
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved class
+#if defined(WITH_JIT)
+ EAS2(rBIX, a3, a1) # rBIX <- &resolved_class
+#endif
+ EXPORT_PC() # req'd for init, resolve, alloc
+ # already resolved?
+ beqz a0, .LOP_NEW_INSTANCE_JUMBO_resolve # no, resolve it now
+.LOP_NEW_INSTANCE_JUMBO_resolved: # a0=class
+ lbu a1, offClassObject_status(a0) # a1 <- ClassStatus enum
+ # has class been initialized?
+ li t0, CLASS_INITIALIZED
+ move rOBJ, a0 # save a0
+ bne a1, t0, .LOP_NEW_INSTANCE_JUMBO_needinit # no, init class now
+
+.LOP_NEW_INSTANCE_JUMBO_initialized: # a0=class
+ LOAD_base_offClassObject_accessFlags(a3, a0) # a3 <- clazz->accessFlags
+ li a1, ALLOC_DONT_TRACK # flags for alloc call
+ # a0=class
+ JAL(dvmAllocObject) # v0 <- new object
+ FETCH(a3, 3) # a3<- BBBB
+#if defined(WITH_JIT)
+ /*
+ * The JIT needs the class to be fully resolved before it can
+ * include this instruction in a trace.
+ */
+ lhu a1, offThread_subMode(rSELF)
+ beqz v0, common_exceptionThrown # yes, handle the exception
+ and a1, kSubModeJitTraceBuild # under construction?
+ bnez a1, .LOP_NEW_INSTANCE_JUMBO_jitCheck
+#else
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle the exception
+#endif
+ b .LOP_NEW_INSTANCE_JUMBO_continue
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: mips/OP_NEW_ARRAY_JUMBO.S */
+ /*
+ * Allocate an array of objects, specified with the array class
+ * and a count.
+ *
+ * The verifier guarantees that this is an array class, so we don't
+ * check for it here.
+ */
+ /* new-array/jumbo vBBBB, vCCCC, class@AAAAAAAA */
+ FETCH(a2, 1) # a2<- aaaa (lo)
+ FETCH(a3, 2) # a3<- AAAA (hi)
+ FETCH(a0, 4) # a0<- vCCCC
+ sll a3,a3,16 #
+ or a2, a2, a3 # a2<- AAAAaaaa
+
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ GET_VREG(a1, a0) # a1 <- vCCCC (array length)
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ LOAD_eas2(a0, a3, a2) # a0 <- resolved class
+ # check length
+ bltz a1, common_errNegativeArraySize # negative length, bail - len in a1
+ EXPORT_PC() # req'd for resolve, alloc
+ # already resolved?
+ beqz a0, .LOP_NEW_ARRAY_JUMBO_resolve # not resolved,
+ b .LOP_NEW_ARRAY_JUMBO_finish
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: mips/OP_FILLED_NEW_ARRAY_JUMBO.S */
+ /*
+ * Create a new array with elements filled from registers.
+ *
+ * TODO: convert most of this into a common subroutine, shared with
+ * OP_FILLED_NEW_ARRAY.S.
+ */
+ /* filled-new-array/jumbo {vCCCC..v(CCCC+BBBB-1)}, type@AAAAAAAA */
+
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # r0<- aaaa (lo)
+ FETCH(a1, 2) # r1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResClasses(a3, a3) # a3 <- pDvmDex->pResClasses
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved class
+ GET_OPA(rOBJ) # rOBJ <- AA or BA
+ EXPORT_PC() # need for resolve and alloc
+ # already resolved?
+ bnez a0, .LOP_FILLED_NEW_ARRAY_JUMBO_continue # yes, continue on
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- call(clazz, ref)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .LOP_FILLED_NEW_ARRAY_JUMBO_continue
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_JUMBO: /* 0x106 */
+/* File: mips/OP_IGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field get.
+ *
+ * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+ * iget-char/jumbo, iget-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_JUMBO_resolved # resolved, continue
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: mips/OP_IGET_WIDE_JUMBO.S */
+ /*
+ * Jumbo 64-bit instance field get.
+ */
+ /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[CCCC], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_WIDE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_WIDE_JUMBO_resolved # resolved, continue
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: mips/OP_IGET_OBJECT_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field get.
+ *
+ * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+ * iget-char/jumbo, iget-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_OBJECT_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_OBJECT_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: mips/OP_IGET_BOOLEAN_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field get.
+ *
+ * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+ * iget-char/jumbo, iget-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_BOOLEAN_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_BOOLEAN_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: mips/OP_IGET_BYTE_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field get.
+ *
+ * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+ * iget-char/jumbo, iget-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_BYTE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_BYTE_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: mips/OP_IGET_CHAR_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field get.
+ *
+ * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+ * iget-char/jumbo, iget-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_CHAR_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_CHAR_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: mips/OP_IGET_SHORT_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field get.
+ *
+ * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+ * iget-char/jumbo, iget-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_SHORT_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_SHORT_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_JUMBO: /* 0x10d */
+/* File: mips/OP_IPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field put.
+ *
+ * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+ * iput-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_JUMBO_resolved # resolved, continue
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: mips/OP_IPUT_WIDE_JUMBO.S */
+ /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_WIDE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_WIDE_JUMBO_resolved # resolved, continue
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: mips/OP_IPUT_OBJECT_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field put.
+ */
+ /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a1,a1,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_OBJECT_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_OBJECT_JUMBO_resolved
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: mips/OP_IPUT_BOOLEAN_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field put.
+ *
+ * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+ * iput-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_BOOLEAN_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_BOOLEAN_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: mips/OP_IPUT_BYTE_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field put.
+ *
+ * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+ * iput-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_BYTE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_BYTE_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: mips/OP_IPUT_CHAR_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field put.
+ *
+ * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+ * iput-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_CHAR_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_CHAR_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: mips/OP_IPUT_SHORT_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field put.
+ *
+ * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+ * iput-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_SHORT_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_SHORT_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_JUMBO: /* 0x114 */
+/* File: mips/OP_SGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit SGET handler.
+ *
+ * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+ * sget-char/jumbo, sget-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_JUMBO_finish # resume
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: mips/OP_SGET_WIDE_JUMBO.S */
+ /*
+ * Jumbo 64-bit SGET handler.
+ */
+ /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(a2, a2) # a2 <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_SGET_WIDE_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ *
+ * Returns StaticField pointer in v0.
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # a0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+ b .LOP_SGET_WIDE_JUMBO_finish # resume
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: mips/OP_SGET_OBJECT_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit SGET handler.
+ *
+ * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+ * sget-char/jumbo, sget-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_OBJECT_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_OBJECT_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: mips/OP_SGET_BOOLEAN_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit SGET handler.
+ *
+ * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+ * sget-char/jumbo, sget-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_BOOLEAN_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_BOOLEAN_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: mips/OP_SGET_BYTE_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit SGET handler.
+ *
+ * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+ * sget-char/jumbo, sget-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_BYTE_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_BYTE_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: mips/OP_SGET_CHAR_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit SGET handler.
+ *
+ * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+ * sget-char/jumbo, sget-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_CHAR_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_CHAR_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: mips/OP_SGET_SHORT_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit SGET handler.
+ *
+ * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+ * sget-char/jumbo, sget-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_SHORT_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_SHORT_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_JUMBO: /* 0x11b */
+/* File: mips/OP_SPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit SPUT handler.
+ *
+ * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+ * sput-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_JUMBO_finish # is resolved entry null?
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_JUMBO_finish # resume
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: mips/OP_SPUT_WIDE_JUMBO.S */
+ /*
+ * Jumbo 64-bit SPUT handler.
+ */
+ /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ FETCH(rOBJ, 3) # rOBJ<- BBBB solved StaticField ptr
+ EAS2(rOBJ, rFP, t0) # rOBJ<- &fp[BBBB]
+ # is resolved entry null?
+ beqz a2, .LOP_SPUT_WIDE_JUMBO_resolve # yes, do resolve
+.LOP_SPUT_WIDE_JUMBO_finish: # field ptr in a2, BBBB in rOBJ
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vBBBB/vBBBB+1
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ .if 0
+ addu a2, offStaticField_value # a2<- pointer to data
+ JAL(dvmQuasiAtomicSwap64Sync) # stores a0/a1 into addr a2
+ .else
+ STORE64_off(a0, a1, a2, offStaticField_value) # field <- vBBBB/vBBBB+1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: mips/OP_SPUT_OBJECT_JUMBO.S */
+ /*
+ * Jumbo 32-bit SPUT handler for objects
+ */
+ /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1,a0,a1 # a1<- AAAAaaaa
+
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_OBJECT_JUMBO_finish # is resolved entry null?
+
+ /* Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_OBJECT_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: mips/OP_SPUT_BOOLEAN_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit SPUT handler.
+ *
+ * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+ * sput-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_BOOLEAN_JUMBO_finish # is resolved entry null?
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_BOOLEAN_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: mips/OP_SPUT_BYTE_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit SPUT handler.
+ *
+ * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+ * sput-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_BYTE_JUMBO_finish # is resolved entry null?
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_BYTE_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: mips/OP_SPUT_CHAR_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit SPUT handler.
+ *
+ * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+ * sput-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_CHAR_JUMBO_finish # is resolved entry null?
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_CHAR_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: mips/OP_SPUT_SHORT_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit SPUT handler.
+ *
+ * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+ * sput-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_SHORT_JUMBO_finish # is resolved entry null?
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_SHORT_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: mips/OP_INVOKE_VIRTUAL_JUMBO.S */
+ /*
+ * Handle a virtual method call.
+ */
+ /* invoke-virtual/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, .LOP_INVOKE_VIRTUAL_JUMBO_continue # yes, continue on
+
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ bnez v0, .LOP_INVOKE_VIRTUAL_JUMBO_continue # no, continue
+ b common_exceptionThrown # yes, handle exception
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: mips/OP_INVOKE_SUPER_JUMBO.S */
+ /*
+ * Handle a "super" method call.
+ */
+ /* invoke-super/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ FETCH(t0, 4) # t0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ GET_VREG(rOBJ, t0) # rOBJ <- "this" ptr
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved baseMethod
+ # null "this"?
+ LOAD_rSELF_method(t1) # t1 <- current method
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ # cmp a0, 0; already resolved?
+ LOAD_base_offMethod_clazz(rBIX, t1) # rBIX <- method->clazz
+ EXPORT_PC() # must export for invoke
+ bnez a0, .LOP_INVOKE_SUPER_JUMBO_continue # resolved, continue on
+
+ move a0, rBIX # a0 <- method->clazz
+ li a2, METHOD_VIRTUAL # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b .LOP_INVOKE_SUPER_JUMBO_continue
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: mips/OP_INVOKE_DIRECT_JUMBO.S */
+ /*
+ * Handle a direct method call.
+ *
+ * (We could defer the "is 'this' pointer null" test to the common
+ * method invocation code, and use a flag to indicate that static
+ * calls don't count. If we do this as part of copying the arguments
+ * out we could avoiding loading the first arg twice.)
+ *
+ */
+ /* invoke-direct/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ FETCH(rBIX, 4) # rBIX <- GFED or CCCC
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+ .if (!0)
+ and rBIX, rBIX, 15 # rBIX <- D (or stays CCCC)
+ .endif
+ EXPORT_PC() # must export for invoke
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ # already resolved?
+ bnez a0, 1f # resolved, call the function
+
+ lw a3, offThread_method(rSELF) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_DIRECT # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+
+1:
+ bnez rOBJ, common_invokeMethodJumbo # a0=method, rOBJ="this"
+ b common_errNullObject # yes, throw exception
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: mips/OP_INVOKE_STATIC_JUMBO.S */
+ /*
+ * Handle a static method call.
+ */
+ /* invoke-static/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a3) # a3 <- pDvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResMethods(a3, a3) # a3 <- pDvmDex->pResMethods
+ sll a1,a1,16
+ or a1, a0, a1 # r1<- AAAAaaaa
+ li rOBJ, 0 # null "this" in delay slot
+ LOAD_eas2(a0, a3, a1) # a0 <- resolved methodToCall
+#if defined(WITH_JIT)
+ EAS2(rBIX, a3, a1) # rBIX<- &resolved_metherToCall
+#endif
+ EXPORT_PC() # must export for invoke
+ # already resolved?
+ bnez a0, common_invokeMethodJumboNoThis # (a0 = method)
+ b .LOP_INVOKE_STATIC_JUMBO_resolve
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: mips/OP_INVOKE_INTERFACE_JUMBO.S */
+ /*
+ * Handle an interface method call.
+ */
+ /* invoke-interface/jumbo {vCCCC..v(CCCC+BBBB-1)}, meth@AAAAAAAA */
+ FETCH(a2, 4) # a2<- CCCC
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ EXPORT_PC() # must export for invoke
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ GET_VREG(rOBJ, a2) # rOBJ <- first arg ("this")
+ LOAD_rSELF_methodClassDex(a3) # a3 <- methodClassDex
+ LOAD_rSELF_method(a2) # a2 <- method
+ # null obj?
+ beqz rOBJ, common_errNullObject # yes, fail
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- thisPtr->clazz
+ JAL(dvmFindInterfaceMethodInCache) # v0 <- call(class, ref, method, dex)
+ move a0, v0
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ b common_invokeMethodJumbo # (a0=method, rOBJ="this")
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_27FF: /* 0x127 */
+/* File: mips/OP_UNUSED_27FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_28FF: /* 0x128 */
+/* File: mips/OP_UNUSED_28FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_29FF: /* 0x129 */
+/* File: mips/OP_UNUSED_29FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_2AFF: /* 0x12a */
+/* File: mips/OP_UNUSED_2AFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_2BFF: /* 0x12b */
+/* File: mips/OP_UNUSED_2BFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_2CFF: /* 0x12c */
+/* File: mips/OP_UNUSED_2CFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_2DFF: /* 0x12d */
+/* File: mips/OP_UNUSED_2DFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_2EFF: /* 0x12e */
+/* File: mips/OP_UNUSED_2EFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_2FFF: /* 0x12f */
+/* File: mips/OP_UNUSED_2FFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_30FF: /* 0x130 */
+/* File: mips/OP_UNUSED_30FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_31FF: /* 0x131 */
+/* File: mips/OP_UNUSED_31FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_32FF: /* 0x132 */
+/* File: mips/OP_UNUSED_32FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_33FF: /* 0x133 */
+/* File: mips/OP_UNUSED_33FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_34FF: /* 0x134 */
+/* File: mips/OP_UNUSED_34FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_35FF: /* 0x135 */
+/* File: mips/OP_UNUSED_35FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_36FF: /* 0x136 */
+/* File: mips/OP_UNUSED_36FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_37FF: /* 0x137 */
+/* File: mips/OP_UNUSED_37FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_38FF: /* 0x138 */
+/* File: mips/OP_UNUSED_38FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_39FF: /* 0x139 */
+/* File: mips/OP_UNUSED_39FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_3AFF: /* 0x13a */
+/* File: mips/OP_UNUSED_3AFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_3BFF: /* 0x13b */
+/* File: mips/OP_UNUSED_3BFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_3CFF: /* 0x13c */
+/* File: mips/OP_UNUSED_3CFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_3DFF: /* 0x13d */
+/* File: mips/OP_UNUSED_3DFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_3EFF: /* 0x13e */
+/* File: mips/OP_UNUSED_3EFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_3FFF: /* 0x13f */
+/* File: mips/OP_UNUSED_3FFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_40FF: /* 0x140 */
+/* File: mips/OP_UNUSED_40FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_41FF: /* 0x141 */
+/* File: mips/OP_UNUSED_41FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_42FF: /* 0x142 */
+/* File: mips/OP_UNUSED_42FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_43FF: /* 0x143 */
+/* File: mips/OP_UNUSED_43FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_44FF: /* 0x144 */
+/* File: mips/OP_UNUSED_44FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_45FF: /* 0x145 */
+/* File: mips/OP_UNUSED_45FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_46FF: /* 0x146 */
+/* File: mips/OP_UNUSED_46FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_47FF: /* 0x147 */
+/* File: mips/OP_UNUSED_47FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_48FF: /* 0x148 */
+/* File: mips/OP_UNUSED_48FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_49FF: /* 0x149 */
+/* File: mips/OP_UNUSED_49FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_4AFF: /* 0x14a */
+/* File: mips/OP_UNUSED_4AFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_4BFF: /* 0x14b */
+/* File: mips/OP_UNUSED_4BFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_4CFF: /* 0x14c */
+/* File: mips/OP_UNUSED_4CFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_4DFF: /* 0x14d */
+/* File: mips/OP_UNUSED_4DFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_4EFF: /* 0x14e */
+/* File: mips/OP_UNUSED_4EFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_4FFF: /* 0x14f */
+/* File: mips/OP_UNUSED_4FFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_50FF: /* 0x150 */
+/* File: mips/OP_UNUSED_50FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_51FF: /* 0x151 */
+/* File: mips/OP_UNUSED_51FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_52FF: /* 0x152 */
+/* File: mips/OP_UNUSED_52FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_53FF: /* 0x153 */
+/* File: mips/OP_UNUSED_53FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_54FF: /* 0x154 */
+/* File: mips/OP_UNUSED_54FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_55FF: /* 0x155 */
+/* File: mips/OP_UNUSED_55FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_56FF: /* 0x156 */
+/* File: mips/OP_UNUSED_56FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_57FF: /* 0x157 */
+/* File: mips/OP_UNUSED_57FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_58FF: /* 0x158 */
+/* File: mips/OP_UNUSED_58FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_59FF: /* 0x159 */
+/* File: mips/OP_UNUSED_59FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_5AFF: /* 0x15a */
+/* File: mips/OP_UNUSED_5AFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_5BFF: /* 0x15b */
+/* File: mips/OP_UNUSED_5BFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_5CFF: /* 0x15c */
+/* File: mips/OP_UNUSED_5CFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_5DFF: /* 0x15d */
+/* File: mips/OP_UNUSED_5DFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_5EFF: /* 0x15e */
+/* File: mips/OP_UNUSED_5EFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_5FFF: /* 0x15f */
+/* File: mips/OP_UNUSED_5FFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_60FF: /* 0x160 */
+/* File: mips/OP_UNUSED_60FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_61FF: /* 0x161 */
+/* File: mips/OP_UNUSED_61FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_62FF: /* 0x162 */
+/* File: mips/OP_UNUSED_62FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_63FF: /* 0x163 */
+/* File: mips/OP_UNUSED_63FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_64FF: /* 0x164 */
+/* File: mips/OP_UNUSED_64FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_65FF: /* 0x165 */
+/* File: mips/OP_UNUSED_65FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_66FF: /* 0x166 */
+/* File: mips/OP_UNUSED_66FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_67FF: /* 0x167 */
+/* File: mips/OP_UNUSED_67FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_68FF: /* 0x168 */
+/* File: mips/OP_UNUSED_68FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_69FF: /* 0x169 */
+/* File: mips/OP_UNUSED_69FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_6AFF: /* 0x16a */
+/* File: mips/OP_UNUSED_6AFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_6BFF: /* 0x16b */
+/* File: mips/OP_UNUSED_6BFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_6CFF: /* 0x16c */
+/* File: mips/OP_UNUSED_6CFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_6DFF: /* 0x16d */
+/* File: mips/OP_UNUSED_6DFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_6EFF: /* 0x16e */
+/* File: mips/OP_UNUSED_6EFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_6FFF: /* 0x16f */
+/* File: mips/OP_UNUSED_6FFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_70FF: /* 0x170 */
+/* File: mips/OP_UNUSED_70FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_71FF: /* 0x171 */
+/* File: mips/OP_UNUSED_71FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_72FF: /* 0x172 */
+/* File: mips/OP_UNUSED_72FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_73FF: /* 0x173 */
+/* File: mips/OP_UNUSED_73FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_74FF: /* 0x174 */
+/* File: mips/OP_UNUSED_74FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_75FF: /* 0x175 */
+/* File: mips/OP_UNUSED_75FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_76FF: /* 0x176 */
+/* File: mips/OP_UNUSED_76FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_77FF: /* 0x177 */
+/* File: mips/OP_UNUSED_77FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_78FF: /* 0x178 */
+/* File: mips/OP_UNUSED_78FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_79FF: /* 0x179 */
+/* File: mips/OP_UNUSED_79FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_7AFF: /* 0x17a */
+/* File: mips/OP_UNUSED_7AFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_7BFF: /* 0x17b */
+/* File: mips/OP_UNUSED_7BFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_7CFF: /* 0x17c */
+/* File: mips/OP_UNUSED_7CFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_7DFF: /* 0x17d */
+/* File: mips/OP_UNUSED_7DFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_7EFF: /* 0x17e */
+/* File: mips/OP_UNUSED_7EFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_7FFF: /* 0x17f */
+/* File: mips/OP_UNUSED_7FFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_80FF: /* 0x180 */
+/* File: mips/OP_UNUSED_80FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_81FF: /* 0x181 */
+/* File: mips/OP_UNUSED_81FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_82FF: /* 0x182 */
+/* File: mips/OP_UNUSED_82FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_83FF: /* 0x183 */
+/* File: mips/OP_UNUSED_83FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_84FF: /* 0x184 */
+/* File: mips/OP_UNUSED_84FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_85FF: /* 0x185 */
+/* File: mips/OP_UNUSED_85FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_86FF: /* 0x186 */
+/* File: mips/OP_UNUSED_86FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_87FF: /* 0x187 */
+/* File: mips/OP_UNUSED_87FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_88FF: /* 0x188 */
+/* File: mips/OP_UNUSED_88FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_89FF: /* 0x189 */
+/* File: mips/OP_UNUSED_89FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_8AFF: /* 0x18a */
+/* File: mips/OP_UNUSED_8AFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_8BFF: /* 0x18b */
+/* File: mips/OP_UNUSED_8BFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_8CFF: /* 0x18c */
+/* File: mips/OP_UNUSED_8CFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_8DFF: /* 0x18d */
+/* File: mips/OP_UNUSED_8DFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_8EFF: /* 0x18e */
+/* File: mips/OP_UNUSED_8EFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_8FFF: /* 0x18f */
+/* File: mips/OP_UNUSED_8FFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_90FF: /* 0x190 */
+/* File: mips/OP_UNUSED_90FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_91FF: /* 0x191 */
+/* File: mips/OP_UNUSED_91FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_92FF: /* 0x192 */
+/* File: mips/OP_UNUSED_92FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_93FF: /* 0x193 */
+/* File: mips/OP_UNUSED_93FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_94FF: /* 0x194 */
+/* File: mips/OP_UNUSED_94FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_95FF: /* 0x195 */
+/* File: mips/OP_UNUSED_95FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_96FF: /* 0x196 */
+/* File: mips/OP_UNUSED_96FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_97FF: /* 0x197 */
+/* File: mips/OP_UNUSED_97FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_98FF: /* 0x198 */
+/* File: mips/OP_UNUSED_98FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_99FF: /* 0x199 */
+/* File: mips/OP_UNUSED_99FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_9AFF: /* 0x19a */
+/* File: mips/OP_UNUSED_9AFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_9BFF: /* 0x19b */
+/* File: mips/OP_UNUSED_9BFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_9CFF: /* 0x19c */
+/* File: mips/OP_UNUSED_9CFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_9DFF: /* 0x19d */
+/* File: mips/OP_UNUSED_9DFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_9EFF: /* 0x19e */
+/* File: mips/OP_UNUSED_9EFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_9FFF: /* 0x19f */
+/* File: mips/OP_UNUSED_9FFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: mips/OP_UNUSED_A0FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: mips/OP_UNUSED_A1FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: mips/OP_UNUSED_A2FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: mips/OP_UNUSED_A3FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: mips/OP_UNUSED_A4FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: mips/OP_UNUSED_A5FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: mips/OP_UNUSED_A6FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: mips/OP_UNUSED_A7FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: mips/OP_UNUSED_A8FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: mips/OP_UNUSED_A9FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: mips/OP_UNUSED_AAFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: mips/OP_UNUSED_ABFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: mips/OP_UNUSED_ACFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: mips/OP_UNUSED_ADFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: mips/OP_UNUSED_AEFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_AFFF: /* 0x1af */
+/* File: mips/OP_UNUSED_AFFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: mips/OP_UNUSED_B0FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: mips/OP_UNUSED_B1FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: mips/OP_UNUSED_B2FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: mips/OP_UNUSED_B3FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: mips/OP_UNUSED_B4FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: mips/OP_UNUSED_B5FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: mips/OP_UNUSED_B6FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: mips/OP_UNUSED_B7FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: mips/OP_UNUSED_B8FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: mips/OP_UNUSED_B9FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: mips/OP_UNUSED_BAFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: mips/OP_UNUSED_BBFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: mips/OP_UNUSED_BCFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: mips/OP_UNUSED_BDFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_BEFF: /* 0x1be */
+/* File: mips/OP_UNUSED_BEFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: mips/OP_UNUSED_BFFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: mips/OP_UNUSED_C0FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: mips/OP_UNUSED_C1FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: mips/OP_UNUSED_C2FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: mips/OP_UNUSED_C3FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: mips/OP_UNUSED_C4FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: mips/OP_UNUSED_C5FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: mips/OP_UNUSED_C6FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: mips/OP_UNUSED_C7FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: mips/OP_UNUSED_C8FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: mips/OP_UNUSED_C9FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: mips/OP_UNUSED_CAFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: mips/OP_UNUSED_CBFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: mips/OP_UNUSED_CCFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: mips/OP_UNUSED_CDFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: mips/OP_UNUSED_CEFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: mips/OP_UNUSED_CFFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: mips/OP_UNUSED_D0FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: mips/OP_UNUSED_D1FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: mips/OP_UNUSED_D2FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: mips/OP_UNUSED_D3FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: mips/OP_UNUSED_D4FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: mips/OP_UNUSED_D5FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: mips/OP_UNUSED_D6FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: mips/OP_UNUSED_D7FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: mips/OP_UNUSED_D8FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: mips/OP_UNUSED_D9FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_DAFF: /* 0x1da */
+/* File: mips/OP_UNUSED_DAFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_DBFF: /* 0x1db */
+/* File: mips/OP_UNUSED_DBFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: mips/OP_UNUSED_DCFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: mips/OP_UNUSED_DDFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_DEFF: /* 0x1de */
+/* File: mips/OP_UNUSED_DEFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_DFFF: /* 0x1df */
+/* File: mips/OP_UNUSED_DFFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: mips/OP_UNUSED_E0FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: mips/OP_UNUSED_E1FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: mips/OP_UNUSED_E2FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: mips/OP_UNUSED_E3FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: mips/OP_UNUSED_E4FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: mips/OP_UNUSED_E5FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: mips/OP_UNUSED_E6FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: mips/OP_UNUSED_E7FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: mips/OP_UNUSED_E8FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: mips/OP_UNUSED_E9FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: mips/OP_UNUSED_EAFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: mips/OP_UNUSED_EBFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: mips/OP_UNUSED_ECFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: mips/OP_UNUSED_EDFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: mips/OP_UNUSED_EEFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: mips/OP_UNUSED_EFFF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: mips/OP_UNUSED_F0FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: mips/OP_UNUSED_F1FF.S */
+/* File: mips/unused.S */
+ BAL(common_abort)
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: mips/OP_INVOKE_OBJECT_INIT_JUMBO.S */
+/* File: mips/OP_INVOKE_OBJECT_INIT_RANGE.S */
+ /*
+ * Invoke Object.<init> on an object. In practice we know that
+ * Object's nullary constructor doesn't do anything, so we just
+ * skip it unless a debugger is active.
+ */
+ FETCH(a1, 4) # a1<- CCCC
+ GET_VREG(a0, a1) # a0<- "this" ptr
+ # check for NULL
+ beqz a0, common_errNullObject # export PC and throw NPE
+ LOAD_base_offObject_clazz(a1, a0) # a1<- obj->clazz
+ LOAD_base_offClassObject_accessFlags(a2, a1) # a2<- clazz->accessFlags
+ and a2, CLASS_ISFINALIZABLE # is this class finalizable?
+ beqz a2, .LOP_INVOKE_OBJECT_INIT_JUMBO_finish # no, go
+
+.LOP_INVOKE_OBJECT_INIT_JUMBO_setFinal:
+ EXPORT_PC() # can throw
+ JAL(dvmSetFinalizable) # call dvmSetFinalizable(obj)
+ LOAD_offThread_exception(a0, rSELF) # a0<- self->exception
+ # exception pending?
+ bnez a0, common_exceptionThrown # yes, handle it
+
+.LOP_INVOKE_OBJECT_INIT_JUMBO_finish:
+ lhu a1, offThread_subMode(rSELF)
+ and a1, kSubModeDebuggerActive # debugger active?
+ bnez a1, .LOP_INVOKE_OBJECT_INIT_JUMBO_debugger # Yes - skip optimization
+ FETCH_ADVANCE_INST(4+1) # advance to next instr, load rINST
+ GET_INST_OPCODE(t0) # t0<- opcode from rINST
+ GOTO_OPCODE(t0) # execute it
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: mips/OP_IGET_VOLATILE_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field get.
+ *
+ * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+ * iget-char/jumbo, iget-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_VOLATILE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_VOLATILE_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: mips/OP_IGET_WIDE_VOLATILE_JUMBO.S */
+/* File: mips/OP_IGET_WIDE_JUMBO.S */
+ /*
+ * Jumbo 64-bit instance field get.
+ */
+ /* iget-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[CCCC], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_WIDE_VOLATILE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_WIDE_VOLATILE_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: mips/OP_IGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: mips/OP_IGET_OBJECT_JUMBO.S */
+/* File: mips/OP_IGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field get.
+ *
+ * for: iget/jumbo, iget-object/jumbo, iget-boolean/jumbo, iget-byte/jumbo,
+ * iget-char/jumbo, iget-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IGET_OBJECT_VOLATILE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved # resolved, continue
+
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: mips/OP_IPUT_VOLATILE_JUMBO.S */
+/* File: mips/OP_IPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field put.
+ *
+ * for: iput/jumbo, iput-boolean/jumbo, iput-byte/jumbo, iput-char/jumbo,
+ * iput-short/jumbo
+ */
+ /* exop vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_VOLATILE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_VOLATILE_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: mips/OP_IPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: mips/OP_IPUT_WIDE_JUMBO.S */
+ /* iput-wide/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_WIDE_VOLATILE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved # resolved, continue
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: mips/OP_IPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: mips/OP_IPUT_OBJECT_JUMBO.S */
+ /*
+ * Jumbo 32-bit instance field put.
+ */
+ /* iput-object/jumbo vBBBB, vCCCC, field@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ FETCH(a0, 4) # a0<- CCCC
+ LOAD_rSELF_methodClassDex(a3) # a3 <- DvmDex
+ sll a1,a1,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ LOAD_base_offDvmDex_pResFields(a2, a3) # a2 <- pDvmDex->pResFields
+ GET_VREG(rOBJ, a0) # rOBJ <- fp[B], the object pointer
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved InstField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish # no, already resolved
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveInstField) # v0 <- resolved InstField ptr
+ b .LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: mips/OP_SGET_VOLATILE_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit SGET handler.
+ *
+ * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+ * sget-char/jumbo, sget-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_VOLATILE_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_VOLATILE_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: mips/OP_SGET_WIDE_VOLATILE_JUMBO.S */
+/* File: mips/OP_SGET_WIDE_JUMBO.S */
+ /*
+ * Jumbo 64-bit SGET handler.
+ */
+ /* sget-wide/jumbo vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(a2, a2) # a2 <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, a2, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry null?
+ bnez a0, .LOP_SGET_WIDE_VOLATILE_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ *
+ * Returns StaticField pointer in v0.
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # a0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+ b .LOP_SGET_WIDE_VOLATILE_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: mips/OP_SGET_OBJECT_VOLATILE_JUMBO.S */
+/* File: mips/OP_SGET_OBJECT_JUMBO.S */
+/* File: mips/OP_SGET_JUMBO.S */
+ /*
+ * Jumbo 32-bit SGET handler.
+ *
+ * for: sget/jumbo, sget-object/jumbo, sget-boolean/jumbo, sget-byte/jumbo,
+ * sget-char/jumbo, sget-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ # is resolved entry !null?
+ bnez a0, .LOP_SGET_OBJECT_VOLATILE_JUMBO_finish
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ # success?
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SGET_OBJECT_VOLATILE_JUMBO_finish # resume
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: mips/OP_SPUT_VOLATILE_JUMBO.S */
+/* File: mips/OP_SPUT_JUMBO.S */
+ /*
+ * Jumbo 32-bit SPUT handler.
+ *
+ * for: sput/jumbo, sput-boolean/jumbo, sput-byte/jumbo, sput-char/jumbo,
+ * sput-short/jumbo
+ */
+ /* exop vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1, a0, a1 # a1<- AAAAaaaa
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_VOLATILE_JUMBO_finish # is resolved entry null?
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_VOLATILE_JUMBO_finish # resume
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: mips/OP_SPUT_WIDE_VOLATILE_JUMBO.S */
+/* File: mips/OP_SPUT_WIDE_JUMBO.S */
+ /*
+ * Jumbo 64-bit SPUT handler.
+ */
+ /* sput-wide/jumbo vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a2,a2,16
+ or a1, a1, a2 # a1<- AAAAaaaa
+ FETCH(rOBJ, 3) # rOBJ<- BBBB solved StaticField ptr
+ EAS2(rOBJ, rFP, t0) # rOBJ<- &fp[BBBB]
+ # is resolved entry null?
+ beqz a2, .LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve # yes, do resolve
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_finish: # field ptr in a2, BBBB in rOBJ
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- vBBBB/vBBBB+1
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ .if 1
+ addu a2, offStaticField_value # a2<- pointer to data
+ JAL(dvmQuasiAtomicSwap64Sync) # stores a0/a1 into addr a2
+ .else
+ STORE64_off(a0, a1, a2, offStaticField_value) # field <- vBBBB/vBBBB+1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: mips/OP_SPUT_OBJECT_VOLATILE_JUMBO.S */
+/* File: mips/OP_SPUT_OBJECT_JUMBO.S */
+ /*
+ * Jumbo 32-bit SPUT handler for objects
+ */
+ /* sput-object/jumbo vBBBB, field@AAAAAAAA */
+ LOAD_rSELF_methodClassDex(a2) # a2 <- DvmDex
+ FETCH(a0, 1) # a0<- aaaa (lo)
+ FETCH(a1, 2) # a1<- AAAA (hi)
+ LOAD_base_offDvmDex_pResFields(rBIX, a2) # rBIX <- dvmDex->pResFields
+ sll a1,a1,16
+ or a1,a0,a1 # a1<- AAAAaaaa
+
+ LOAD_eas2(a0, rBIX, a1) # a0 <- resolved StaticField ptr
+ bnez a0, .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish # is resolved entry null?
+
+ /* Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rBIX: dvmDex->pResFields
+ */
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() may throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ move a0, v0
+ beqz v0, common_exceptionThrown # success? no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ b .LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish # resume
+
+
+
+/* ------------------------------ */
+ .balign 128
+.L_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: mips/OP_THROW_VERIFICATION_ERROR_JUMBO.S */
+ /*
+ * Handle a jumbo throw-verification-error instruction. This throws an
+ * exception for an error discovered during verification. The
+ * exception is indicated by BBBB, with some detail provided by AAAAAAAA.
+ */
+ /* exop BBBB, Class@AAAAAAAA */
+ FETCH(a1, 1) # a1<- aaaa (lo)
+ FETCH(a2, 2) # a2<- AAAA (hi)
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ sll a2,a2,16
+ or a2, a1, a2 # a2<- AAAAaaaa
+ EXPORT_PC() # export the PC
+ FETCH(a1, 3) # a1<- BBBB
+ JAL(dvmThrowVerificationError) # always throws
+ b common_exceptionThrown # handle exception
+
+
+ .balign 128
+ .size dvmAsmInstructionStart, .-dvmAsmInstructionStart
+ .global dvmAsmInstructionEnd
+dvmAsmInstructionEnd:
+
+/*
+ * ===========================================================================
+ * Sister implementations
+ * ===========================================================================
+ */
+ .global dvmAsmSisterStart
+ .type dvmAsmSisterStart, %function
+ .text
+ .balign 4
+dvmAsmSisterStart:
+
+/* continuation for OP_CHECK_CAST */
+
+.LOP_CHECK_CAST_castfailure:
+ # A cast has failed. We need to throw a ClassCastException with the
+ # class of the object that failed to be cast.
+ EXPORT_PC() # about to throw
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- obj->clazz
+ move a1,rBIX # r1<- desired class
+ JAL(dvmThrowClassCastException)
+ b common_exceptionThrown
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a2 holds BBBB
+ * rOBJ holds object
+ */
+.LOP_CHECK_CAST_resolve:
+ EXPORT_PC() # resolve() could throw
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ move a1, a2 # a1 <- BBBB
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ move a1, v0 # a1 <- class resolved from BBB
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0 <- obj->clazz
+ b .LOP_CHECK_CAST_resolved # pick up where we left off
+
+/* continuation for OP_INSTANCE_OF */
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * a0 holds obj->clazz
+ * a1 holds class resolved from BBBB
+ * rOBJ holds A
+ */
+.LOP_INSTANCE_OF_fullcheck:
+ JAL(dvmInstanceofNonTrivial) # v0 <- boolean result
+ move a0, v0 # fall through to OP_INSTANCE_OF_store
+ b .LOP_INSTANCE_OF_store
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a3 holds BBBB
+ * rOBJ holds A
+ */
+.LOP_INSTANCE_OF_resolve:
+ EXPORT_PC() # resolve() could throw
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ move a1, a3 # a1 <- BBBB
+ li a2, 1 # a2 <- true
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ # got null?
+ move a1, v0 # a1 <- class resolved from BBB
+ beqz v0, common_exceptionThrown # yes, handle exception
+ GET_OPB(a3) # a3 <- B
+ GET_VREG(a0, a3) # a0 <- vB (object)
+ LOAD_base_offObject_clazz(a0, a0) # a0 <- obj->clazz
+ b .LOP_INSTANCE_OF_resolved # pick up where we left off
+
+
+/* continuation for OP_NEW_INSTANCE */
+
+.LOP_NEW_INSTANCE_continue:
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(v0, a3) # vAA <- v0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we need to stop the trace building early.
+ * v0: new object
+ * a3: vAA
+ */
+.LOP_NEW_INSTANCE_jitCheck:
+ lw a1, 0(rBIX) # reload resolved class
+ # okay?
+ bnez a1, .LOP_NEW_INSTANCE_continue # yes, finish
+ move rOBJ, v0 # preserve new object
+ move rBIX, a3 # preserve vAA
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) # (self, pc)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(rOBJ, rBIX) # vAA <- new object
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+ /*
+ * Class initialization required.
+ *
+ * a0 holds class object
+ */
+.LOP_NEW_INSTANCE_needinit:
+ JAL(dvmInitClass) # initialize class
+ move a0, rOBJ # restore a0
+ # check boolean result
+ bnez v0, .LOP_NEW_INSTANCE_initialized # success, continue
+ b common_exceptionThrown # failed, deal with init exception
+
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a1 holds BBBB
+ */
+.LOP_NEW_INSTANCE_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ move a0, v0
+ # got null?
+ bnez v0, .LOP_NEW_INSTANCE_resolved # no, continue
+ b common_exceptionThrown # yes, handle exception
+
+/* continuation for OP_NEW_ARRAY */
+
+ /*
+ * Resolve class. (This is an uncommon case.)
+ *
+ * a1 holds array length
+ * a2 holds class ref CCCC
+ */
+.LOP_NEW_ARRAY_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ move rOBJ, a1 # rOBJ <- length (save)
+ move a1, a2 # a1 <- CCCC
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- call(clazz, ref)
+ move a1, rOBJ # a1 <- length (restore)
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ move a0, v0
+ b .LOP_NEW_ARRAY_finish # continue with OP_NEW_ARRAY_finish
+
+
+
+/* continuation for OP_FILLED_NEW_ARRAY */
+
+ /*
+ * On entry:
+ * a0 holds array class
+ * rOBJ holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_continue:
+ LOAD_base_offClassObject_descriptor(a3, a0) # a3 <- arrayClass->descriptor
+ li a2, ALLOC_DONT_TRACK # a2 <- alloc flags
+ lbu rINST, 1(a3) # rINST <- descriptor[1]
+ .if 0
+ move a1, rOBJ # a1 <- AA (length)
+ .else
+ srl a1, rOBJ, 4 # rOBJ <- B (length)
+ .endif
+ seq t0, rINST, 'I' # array of ints?
+ seq t1, rINST, 'L' # array of objects?
+ or t0, t1
+ seq t1, rINST, '[' # array of arrays?
+ or t0, t1
+ move rBIX, a1 # save length in rBIX
+ beqz t0, .LOP_FILLED_NEW_ARRAY_notimpl # no, not handled yet
+ JAL(dvmAllocArrayByClass) # v0 <- call(arClass, length, flags)
+ # null return?
+ beqz v0, common_exceptionThrown # alloc failed, handle exception
+
+ FETCH(a1, 2) # a1 <- FEDC or CCCC
+ sw v0, offThread_retval(rSELF) # retval.l <- new array
+ sw rINST, (offThread_retval+4)(rSELF) # retval.h <- type
+ addu a0, v0, offArrayObject_contents # a0 <- newArray->contents
+ subu rBIX, rBIX, 1 # length--, check for neg
+ FETCH_ADVANCE_INST(3) # advance to next instr, load rINST
+ bltz rBIX, 2f # was zero, bail
+
+ # copy values from registers into the array
+ # a0=array, a1=CCCC/FEDC, t0=length (from AA or B), rOBJ=AA/BA
+ move t0, rBIX
+ .if 0
+ EAS2(a2, rFP, a1) # a2 <- &fp[CCCC]
+1:
+ lw a3, 0(a2) # a3 <- *a2++
+ addu a2, 4
+ subu t0, t0, 1 # count--
+ sw a3, (a0) # *contents++ = vX
+ addu a0, 4
+ bgez t0, 1b
+
+ # continue at 2
+ .else
+ slt t1, t0, 4 # length was initially 5?
+ and a2, rOBJ, 15 # a2 <- A
+ bnez t1, 1f # <= 4 args, branch
+ GET_VREG(a3, a2) # a3 <- vA
+ subu t0, t0, 1 # count--
+ sw a3, 16(a0) # contents[4] = vA
+1:
+ and a2, a1, 15 # a2 <- F/E/D/C
+ GET_VREG(a3, a2) # a3 <- vF/vE/vD/vC
+ srl a1, a1, 4 # a1 <- next reg in low 4
+ subu t0, t0, 1 # count--
+ sw a3, 0(a0) # *contents++ = vX
+ addu a0, a0, 4
+ bgez t0, 1b
+ # continue at 2
+ .endif
+
+2:
+ lw a0, offThread_retval(rSELF) # a0 <- object
+ lw a1, (offThread_retval+4)(rSELF) # a1 <- type
+ seq t1, a1, 'I' # Is int array?
+ bnez t1, 3f
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ srl t3, a0, GC_CARD_SHIFT
+ addu t2, a2, t3
+ sb a2, (t2)
+3:
+ GET_INST_OPCODE(t0) # ip <- opcode from rINST
+ GOTO_OPCODE(t0) # execute it
+
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_notimpl:
+ la a0, .LstrFilledNewArrayNotImpl
+ JAL(dvmThrowInternalError)
+ b common_exceptionThrown
+
+ /*
+ * Ideally we'd only define this once, but depending on layout we can
+ * exceed the range of the load above.
+ */
+
+/* continuation for OP_FILLED_NEW_ARRAY_RANGE */
+
+ /*
+ * On entry:
+ * a0 holds array class
+ * rOBJ holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_continue:
+ LOAD_base_offClassObject_descriptor(a3, a0) # a3 <- arrayClass->descriptor
+ li a2, ALLOC_DONT_TRACK # a2 <- alloc flags
+ lbu rINST, 1(a3) # rINST <- descriptor[1]
+ .if 1
+ move a1, rOBJ # a1 <- AA (length)
+ .else
+ srl a1, rOBJ, 4 # rOBJ <- B (length)
+ .endif
+ seq t0, rINST, 'I' # array of ints?
+ seq t1, rINST, 'L' # array of objects?
+ or t0, t1
+ seq t1, rINST, '[' # array of arrays?
+ or t0, t1
+ move rBIX, a1 # save length in rBIX
+ beqz t0, .LOP_FILLED_NEW_ARRAY_RANGE_notimpl # no, not handled yet
+ JAL(dvmAllocArrayByClass) # v0 <- call(arClass, length, flags)
+ # null return?
+ beqz v0, common_exceptionThrown # alloc failed, handle exception
+
+ FETCH(a1, 2) # a1 <- FEDC or CCCC
+ sw v0, offThread_retval(rSELF) # retval.l <- new array
+ sw rINST, (offThread_retval+4)(rSELF) # retval.h <- type
+ addu a0, v0, offArrayObject_contents # a0 <- newArray->contents
+ subu rBIX, rBIX, 1 # length--, check for neg
+ FETCH_ADVANCE_INST(3) # advance to next instr, load rINST
+ bltz rBIX, 2f # was zero, bail
+
+ # copy values from registers into the array
+ # a0=array, a1=CCCC/FEDC, t0=length (from AA or B), rOBJ=AA/BA
+ move t0, rBIX
+ .if 1
+ EAS2(a2, rFP, a1) # a2 <- &fp[CCCC]
+1:
+ lw a3, 0(a2) # a3 <- *a2++
+ addu a2, 4
+ subu t0, t0, 1 # count--
+ sw a3, (a0) # *contents++ = vX
+ addu a0, 4
+ bgez t0, 1b
+
+ # continue at 2
+ .else
+ slt t1, t0, 4 # length was initially 5?
+ and a2, rOBJ, 15 # a2 <- A
+ bnez t1, 1f # <= 4 args, branch
+ GET_VREG(a3, a2) # a3 <- vA
+ subu t0, t0, 1 # count--
+ sw a3, 16(a0) # contents[4] = vA
+1:
+ and a2, a1, 15 # a2 <- F/E/D/C
+ GET_VREG(a3, a2) # a3 <- vF/vE/vD/vC
+ srl a1, a1, 4 # a1 <- next reg in low 4
+ subu t0, t0, 1 # count--
+ sw a3, 0(a0) # *contents++ = vX
+ addu a0, a0, 4
+ bgez t0, 1b
+ # continue at 2
+ .endif
+
+2:
+ lw a0, offThread_retval(rSELF) # a0 <- object
+ lw a1, (offThread_retval+4)(rSELF) # a1 <- type
+ seq t1, a1, 'I' # Is int array?
+ bnez t1, 3f
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ srl t3, a0, GC_CARD_SHIFT
+ addu t2, a2, t3
+ sb a2, (t2)
+3:
+ GET_INST_OPCODE(t0) # ip <- opcode from rINST
+ GOTO_OPCODE(t0) # execute it
+
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_RANGE_notimpl:
+ la a0, .LstrFilledNewArrayNotImpl
+ JAL(dvmThrowInternalError)
+ b common_exceptionThrown
+
+ /*
+ * Ideally we'd only define this once, but depending on layout we can
+ * exceed the range of the load above.
+ */
+
+/* continuation for OP_CMPL_FLOAT */
+
+OP_CMPL_FLOAT_nan:
+ li rTEMP, -1
+ b OP_CMPL_FLOAT_finish
+
+#ifdef SOFT_FLOAT
+OP_CMPL_FLOAT_continue:
+ JAL(__gtsf2) # v0 <- (vBB > vCC)
+ li rTEMP, 1 # rTEMP = 1 if v0 != 0
+ bgtz v0, OP_CMPL_FLOAT_finish
+ b OP_CMPL_FLOAT_nan
+#endif
+
+OP_CMPL_FLOAT_finish:
+ GET_OPA(t0)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ SET_VREG(rTEMP, t0) # vAA <- rTEMP
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0)
+
+/* continuation for OP_CMPG_FLOAT */
+
+OP_CMPG_FLOAT_nan:
+ li rTEMP, 1
+ b OP_CMPG_FLOAT_finish
+
+#ifdef SOFT_FLOAT
+OP_CMPG_FLOAT_continue:
+ JAL(__gtsf2) # v0 <- (vBB > vCC)
+ li rTEMP, 1 # rTEMP = 1 if v0 != 0
+ bgtz v0, OP_CMPG_FLOAT_finish
+ b OP_CMPG_FLOAT_nan
+#endif
+
+OP_CMPG_FLOAT_finish:
+ GET_OPA(t0)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ SET_VREG(rTEMP, t0) # vAA <- rTEMP
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0)
+
+/* continuation for OP_CMPL_DOUBLE */
+
+OP_CMPL_DOUBLE_nan:
+ li rTEMP, -1
+ b OP_CMPL_DOUBLE_finish
+
+#ifdef SOFT_FLOAT
+OP_CMPL_DOUBLE_continue:
+ LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1
+ JAL(__gtdf2) # fallthru
+ li rTEMP, 1 # rTEMP = 1 if v0 != 0
+ blez v0, OP_CMPL_DOUBLE_nan # fall thru for finish
+#endif
+
+OP_CMPL_DOUBLE_finish:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
+
+/* continuation for OP_CMPG_DOUBLE */
+
+OP_CMPG_DOUBLE_nan:
+ li rTEMP, 1
+ b OP_CMPG_DOUBLE_finish
+
+#ifdef SOFT_FLOAT
+OP_CMPG_DOUBLE_continue:
+ LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1
+ JAL(__gtdf2) # fallthru
+ li rTEMP, 1 # rTEMP = 1 if v0 != 0
+ blez v0, OP_CMPG_DOUBLE_nan # fall thru for finish
+#endif
+
+OP_CMPG_DOUBLE_finish:
+ GET_OPA(rOBJ)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP
+
+/* continuation for OP_APUT_OBJECT */
+.LOP_APUT_OBJECT_checks:
+ LOAD_base_offObject_clazz(a0, rBIX) # a0 <- obj->clazz
+ LOAD_base_offObject_clazz(a1, rINST) # a1 <- arrayObj->clazz
+ JAL(dvmCanPutArrayElement) # test object type vs. array type
+ beqz v0, .LOP_APUT_OBJECT_throw # okay ?
+ lw a2, offThread_cardTable(rSELF)
+ srl t1, rINST, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, (t2)
+ b .LOP_APUT_OBJECT_finish # yes, skip type checks
+.LOP_APUT_OBJECT_throw:
+ LOAD_base_offObject_clazz(a0, rBIX) # a0 <- obj->clazz
+ LOAD_base_offObject_clazz(a1, rINST) # a1 <- arrayObj->clazz
+ EXPORT_PC()
+ JAL(dvmThrowArrayStoreExceptionIncompatibleElement)
+ b common_exceptionThrown
+
+/* continuation for OP_IGET */
+
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ GET_OPA4(a2) # a2 <- A+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IGET_WIDE_finish:
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ beqz rOBJ, common_errNullObject # object was null
+ GET_OPA4(a2) # a2 <- A+
+ addu rOBJ, rOBJ, a3 # form address
+ .if 0
+ vLOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .else
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a3, rFP, a2) # a3 <- &fp[A]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # fp[A] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_OBJECT */
+
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_OBJECT_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ GET_OPA4(a2) # a2 <- A+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_BOOLEAN */
+
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_BOOLEAN_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ GET_OPA4(a2) # a2 <- A+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_BYTE */
+
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_BYTE_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ GET_OPA4(a2) # a2 <- A+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_CHAR */
+
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_CHAR_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ GET_OPA4(a2) # a2 <- A+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_SHORT */
+
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_SHORT_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ GET_OPA4(a2) # a2 <- A+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_finish:
+ #BAL(common_squeak0)
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_WIDE_finish:
+ GET_OPA4(a2) # a2 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ EAS2(a2, rFP, a2) # a2 <- &fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a0, a1, a2) # a0/a1 <- fp[A]
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ addu a2, rOBJ, a3 # form address
+ .if 0
+ JAL(dvmQuasiAtomicSwap64Sync) # stores r0/r1 into addr r2
+# STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .else
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
+
+/* continuation for OP_IPUT_OBJECT */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_OBJECT_finish:
+ #BAL(common_squeak0)
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu t2, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (t2) # obj.field (32 bits) <- a0
+ # noop
+ beqz a0, 1f # stored a null reference?
+ srl t1, rOBJ, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, (t2) # mark card if not
+1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_BOOLEAN */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_BOOLEAN_finish:
+ #BAL(common_squeak0)
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_BYTE */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_BYTE_finish:
+ #BAL(common_squeak0)
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_CHAR */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_CHAR_finish:
+ #BAL(common_squeak0)
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_SHORT */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_SHORT_finish:
+ #BAL(common_squeak0)
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_SGET */
+
+.LOP_SGET_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[AA] <- a1
+
+/* continuation for OP_SGET_WIDE */
+
+.LOP_SGET_WIDE_finish:
+ GET_OPA(a1) # a1 <- AA
+ .if 0
+ vLOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .else
+ LOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a1, rFP, a1) # a1 <- &fp[AA]
+ STORE64(a2, a3, a1) # vAA/vAA+1 <- a2/a3
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+
+/* continuation for OP_SGET_OBJECT */
+
+.LOP_SGET_OBJECT_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[AA] <- a1
+
+/* continuation for OP_SGET_BOOLEAN */
+
+.LOP_SGET_BOOLEAN_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[AA] <- a1
+
+/* continuation for OP_SGET_BYTE */
+
+.LOP_SGET_BYTE_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[AA] <- a1
+
+/* continuation for OP_SGET_CHAR */
+
+.LOP_SGET_CHAR_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[AA] <- a1
+
+/* continuation for OP_SGET_SHORT */
+
+.LOP_SGET_SHORT_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[AA] <- a1
+
+/* continuation for OP_SPUT */
+
+.LOP_SPUT_finish:
+ # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_WIDE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rOBJ: &fp[AA]
+ * rBIX: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in a2.
+ */
+.LOP_SPUT_WIDE_resolve:
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ # success ?
+ move a0, v0
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ move a2, v0
+ b .LOP_SPUT_WIDE_finish # resume
+
+/* continuation for OP_SPUT_OBJECT */
+.LOP_SPUT_OBJECT_finish: # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ lw t1, offField_clazz(a0) # t1 <- field->clazz
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ # no-op
+ beqz a1, 1f
+ srl t2, t1, GC_CARD_SHIFT
+ addu t3, a2, t2
+ sb a2, (t3)
+1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_BOOLEAN */
+
+.LOP_SPUT_BOOLEAN_finish:
+ # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_BYTE */
+
+.LOP_SPUT_BYTE_finish:
+ # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_CHAR */
+
+.LOP_SPUT_CHAR_finish:
+ # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_SHORT */
+
+.LOP_SPUT_SHORT_finish:
+ # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_INVOKE_VIRTUAL */
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_continue:
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ # is "this" null?
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ LOAD_base_offObject_clazz(a3, rOBJ) # a3 <- thisPtr->clazz
+ LOAD_base_offClassObject_vtable(a3, a3) # a3 <- thisPtr->clazz->vtable
+ LOAD_eas2(a0, a3, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethodNoRange # (a0=method, rOBJ="this")
+
+
+/* continuation for OP_INVOKE_SUPER */
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX = method->clazz
+ */
+.LOP_INVOKE_SUPER_continue:
+ LOAD_base_offClassObject_super(a1, rBIX) # a1 <- method->clazz->super
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ LOAD_base_offClassObject_vtableCount(a3, a1) # a3 <- super->vtableCount
+ EXPORT_PC() # must export for invoke
+ # compare (methodIndex, vtableCount)
+ bgeu a2, a3, .LOP_INVOKE_SUPER_nsm # method not present in superclass
+ LOAD_base_offClassObject_vtable(a1, a1) # a1 <- ...clazz->super->vtable
+ LOAD_eas2(a0, a1, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethodNoRange # continue on
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * a0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_nsm:
+ LOAD_base_offMethod_name(a1, a0) # a1 <- method name
+ b common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_STATIC */
+
+.LOP_INVOKE_STATIC_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_STATIC # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we're actively building a trace. If so,
+ * we need to keep this instruction out of it.
+ * rBIX: &resolved_methodToCall
+ */
+ lhu a2, offThread_subMode(rSELF)
+ beqz v0, common_exceptionThrown # null, handle exception
+ and a2, kSubModeJitTraceBuild # trace under construction?
+ beqz a2, common_invokeMethodNoRange # no, (a0=method, rOBJ="this")
+ lw a1, 0(rBIX) # reload resolved method
+ # finished resloving?
+ bnez a1, common_invokeMethodNoRange # yes, (a0=method, rOBJ="this")
+ move rBIX, a0 # preserve method
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) # (self, pc)
+ move a0, rBIX
+ b common_invokeMethodNoRange # whew, finally!
+#else
+ # got null?
+ bnez v0, common_invokeMethodNoRange # (a0=method, rOBJ="this")
+ b common_exceptionThrown # yes, handle exception
+#endif
+
+/* continuation for OP_INVOKE_VIRTUAL_RANGE */
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_RANGE_continue:
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ # is "this" null?
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ LOAD_base_offObject_clazz(a3, rOBJ) # a3 <- thisPtr->clazz
+ LOAD_base_offClassObject_vtable(a3, a3) # a3 <- thisPtr->clazz->vtable
+ LOAD_eas2(a0, a3, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethodRange # (a0=method, rOBJ="this")
+
+
+/* continuation for OP_INVOKE_SUPER_RANGE */
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX = method->clazz
+ */
+.LOP_INVOKE_SUPER_RANGE_continue:
+ LOAD_base_offClassObject_super(a1, rBIX) # a1 <- method->clazz->super
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ LOAD_base_offClassObject_vtableCount(a3, a1) # a3 <- super->vtableCount
+ EXPORT_PC() # must export for invoke
+ # compare (methodIndex, vtableCount)
+ bgeu a2, a3, .LOP_INVOKE_SUPER_RANGE_nsm # method not present in superclass
+ LOAD_base_offClassObject_vtable(a1, a1) # a1 <- ...clazz->super->vtable
+ LOAD_eas2(a0, a1, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethodRange # continue on
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * a0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_RANGE_nsm:
+ LOAD_base_offMethod_name(a1, a0) # a1 <- method name
+ b common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_STATIC_RANGE */
+
+.LOP_INVOKE_STATIC_RANGE_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_STATIC # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we're actively building a trace. If so,
+ * we need to keep this instruction out of it.
+ * rBIX: &resolved_methodToCall
+ */
+ lhu a2, offThread_subMode(rSELF)
+ beqz v0, common_exceptionThrown # null, handle exception
+ and a2, kSubModeJitTraceBuild # trace under construction?
+ beqz a2, common_invokeMethodRange # no, (a0=method, rOBJ="this")
+ lw a1, 0(rBIX) # reload resolved method
+ # finished resloving?
+ bnez a1, common_invokeMethodRange # yes, (a0=method, rOBJ="this")
+ move rBIX, a0 # preserve method
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) # (self, pc)
+ move a0, rBIX
+ b common_invokeMethodRange # whew, finally!
+#else
+ # got null?
+ bnez v0, common_invokeMethodRange # (a0=method, rOBJ="this")
+ b common_exceptionThrown # yes, handle exception
+#endif
+
+/* continuation for OP_FLOAT_TO_INT */
+
+/*
+ * Not an entry point as it is used only once !!
+ */
+f2i_doconv:
+#ifdef SOFT_FLOAT
+ li a1, 0x4f000000 # (float)maxint
+ move rBIX, a0
+ JAL(__gesf2) # is arg >= maxint?
+ move t0, v0
+ li v0, ~0x80000000 # return maxint (7fffffff)
+ bgez t0, .LOP_FLOAT_TO_INT_set_vreg
+
+ move a0, rBIX # recover arg
+ li a1, 0xcf000000 # (float)minint
+ JAL(__lesf2)
+
+ move t0, v0
+ li v0, 0x80000000 # return minint (80000000)
+ blez t0, .LOP_FLOAT_TO_INT_set_vreg
+ move a0, rBIX
+ move a1, rBIX
+ JAL(__nesf2)
+
+ move t0, v0
+ li v0, 0 # return zero for NaN
+ bnez t0, .LOP_FLOAT_TO_INT_set_vreg
+
+ move a0, rBIX
+ JAL(__fixsfsi)
+ b .LOP_FLOAT_TO_INT_set_vreg
+#else
+ l.s fa1, .LFLOAT_TO_INT_max
+ c.ole.s fcc0, fa1, fa0
+ l.s fv0, .LFLOAT_TO_INT_ret_max
+ bc1t .LOP_FLOAT_TO_INT_set_vreg_f
+
+ l.s fa1, .LFLOAT_TO_INT_min
+ c.ole.s fcc0, fa0, fa1
+ l.s fv0, .LFLOAT_TO_INT_ret_min
+ bc1t .LOP_FLOAT_TO_INT_set_vreg_f
+
+ mov.s fa1, fa0
+ c.un.s fcc0, fa0, fa1
+ li.s fv0, 0
+ bc1t .LOP_FLOAT_TO_INT_set_vreg_f
+
+ trunc.w.s fv0, fa0
+ b .LOP_FLOAT_TO_INT_set_vreg_f
+#endif
+
+.LFLOAT_TO_INT_max:
+ .word 0x4f000000
+.LFLOAT_TO_INT_min:
+ .word 0xcf000000
+.LFLOAT_TO_INT_ret_max:
+ .word 0x7fffffff
+.LFLOAT_TO_INT_ret_min:
+ .word 0x80000000
+
+
+/* continuation for OP_FLOAT_TO_LONG */
+
+f2l_doconv:
+#ifdef SOFT_FLOAT
+ li a1, 0x5f000000
+ move rBIX, a0
+ JAL(__gesf2)
+
+ move t0, v0
+ li rRESULT0, ~0
+ li rRESULT1, ~0x80000000
+ bgez t0, .LOP_FLOAT_TO_LONG_set_vreg
+
+ move a0, rBIX
+ li a1, 0xdf000000
+ JAL(__lesf2)
+
+ move t0, v0
+ li rRESULT0, 0
+ li rRESULT1, 0x80000000
+ blez t0, .LOP_FLOAT_TO_LONG_set_vreg
+
+ move a0, rBIX
+ move a1, rBIX
+ JAL(__nesf2)
+
+ move t0, v0
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bnez t0, .LOP_FLOAT_TO_LONG_set_vreg
+
+ move a0, rBIX
+ JAL(__fixsfdi)
+
+#else
+ l.s fa1, .LLONG_TO_max
+ c.ole.s fcc0, fa1, fa0
+ li rRESULT0, ~0
+ li rRESULT1, ~0x80000000
+ bc1t .LOP_FLOAT_TO_LONG_set_vreg
+
+ l.s fa1, .LLONG_TO_min
+ c.ole.s fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0x80000000
+ bc1t .LOP_FLOAT_TO_LONG_set_vreg
+
+ mov.s fa1, fa0
+ c.un.s fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1t .LOP_FLOAT_TO_LONG_set_vreg
+
+ JAL(__fixsfdi)
+#endif
+
+ b .LOP_FLOAT_TO_LONG_set_vreg
+
+.LLONG_TO_max:
+ .word 0x5f000000
+
+.LLONG_TO_min:
+ .word 0xdf000000
+
+/* continuation for OP_DOUBLE_TO_INT */
+
+
+d2i_doconv:
+#ifdef SOFT_FLOAT
+ la t0, .LDOUBLE_TO_INT_max
+ LOAD64(rARG2, rARG3, t0)
+ move rBIX, rARG0 # save a0
+ move rTEMP, rARG1 # and a1
+ JAL(__gedf2) # is arg >= maxint?
+
+ move t0, v0
+ li v0, ~0x80000000 # return maxint (7fffffff)
+ bgez t0, .LOP_DOUBLE_TO_INT_set_vreg # nonzero == yes
+
+ move rARG0, rBIX # recover arg
+ move rARG1, rTEMP
+ la t0, .LDOUBLE_TO_INT_min
+ LOAD64(rARG2, rARG3, t0)
+ JAL(__ledf2) # is arg <= minint?
+
+ move t0, v0
+ li v0, 0x80000000 # return minint (80000000)
+ blez t0, .LOP_DOUBLE_TO_INT_set_vreg # nonzero == yes
+
+ move rARG0, rBIX # recover arg
+ move rARG1, rTEMP
+ move rARG2, rBIX # compare against self
+ move rARG3, rTEMP
+ JAL(__nedf2) # is arg == self?
+
+ move t0, v0 # zero == no
+ li v0, 0
+ bnez t0, .LOP_DOUBLE_TO_INT_set_vreg # return zero for NaN
+
+ move rARG0, rBIX # recover arg
+ move rARG1, rTEMP
+ JAL(__fixdfsi) # convert double to int
+ b .LOP_DOUBLE_TO_INT_set_vreg
+#else
+ la t0, .LDOUBLE_TO_INT_max
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa1, fa0
+ l.s fv0, .LDOUBLE_TO_INT_maxret
+ bc1t .LOP_DOUBLE_TO_INT_set_vreg_f
+
+ la t0, .LDOUBLE_TO_INT_min
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa0, fa1
+ l.s fv0, .LDOUBLE_TO_INT_minret
+ bc1t .LOP_DOUBLE_TO_INT_set_vreg_f
+
+ mov.d fa1, fa0
+ c.un.d fcc0, fa0, fa1
+ li.s fv0, 0
+ bc1t .LOP_DOUBLE_TO_INT_set_vreg_f
+
+ trunc.w.d fv0, fa0
+ b .LOP_DOUBLE_TO_INT_set_vreg_f
+#endif
+
+
+.LDOUBLE_TO_INT_max:
+ .dword 0x41dfffffffc00000
+.LDOUBLE_TO_INT_min:
+ .dword 0xc1e0000000000000 # minint, as a double (high word)
+.LDOUBLE_TO_INT_maxret:
+ .word 0x7fffffff
+.LDOUBLE_TO_INT_minret:
+ .word 0x80000000
+
+/* continuation for OP_DOUBLE_TO_LONG */
+
+d2l_doconv:
+#ifdef SOFT_FLOAT
+ la t0, .LDOUBLE_TO_LONG_max
+ LOAD64(rARG2, rARG3, t0)
+ move rBIX, rARG0 # save a0
+ move rTEMP, rARG1 # and a1
+ JAL(__gedf2)
+
+ move t1, v0
+ la t0, .LDOUBLE_TO_LONG_ret_max
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bgez t1, .LOP_DOUBLE_TO_LONG_set_vreg
+
+ move rARG0, rBIX
+ move rARG1, rTEMP
+ la t0, .LDOUBLE_TO_LONG_min
+ LOAD64(rARG2, rARG3, t0)
+ JAL(__ledf2)
+
+ move t1, v0
+ la t0, .LDOUBLE_TO_LONG_ret_min
+ LOAD64(rRESULT0, rRESULT1, t0)
+ blez t1, .LOP_DOUBLE_TO_LONG_set_vreg
+
+ move rARG0, rBIX
+ move rARG1, rTEMP
+ move rARG2, rBIX
+ move rARG3, rTEMP
+ JAL(__nedf2)
+
+ move t0, v0
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bnez t0, .LOP_DOUBLE_TO_LONG_set_vreg
+
+ move rARG0, rBIX
+ move rARG1, rTEMP
+ JAL(__fixdfdi)
+
+#else
+ la t0, .LDOUBLE_TO_LONG_max
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa1, fa0
+ la t0, .LDOUBLE_TO_LONG_ret_max
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1t .LOP_DOUBLE_TO_LONG_set_vreg
+
+ la t0, .LDOUBLE_TO_LONG_min
+ LOAD64_F(fa1, fa1f, t0)
+ c.ole.d fcc0, fa0, fa1
+ la t0, .LDOUBLE_TO_LONG_ret_min
+ LOAD64(rRESULT0, rRESULT1, t0)
+ bc1t .LOP_DOUBLE_TO_LONG_set_vreg
+
+ mov.d fa1, fa0
+ c.un.d fcc0, fa0, fa1
+ li rRESULT0, 0
+ li rRESULT1, 0
+ bc1t .LOP_DOUBLE_TO_LONG_set_vreg
+ JAL(__fixdfdi)
+#endif
+ b .LOP_DOUBLE_TO_LONG_set_vreg
+
+
+.LDOUBLE_TO_LONG_max:
+ .dword 0x43e0000000000000 # maxlong, as a double (high word)
+.LDOUBLE_TO_LONG_min:
+ .dword 0xc3e0000000000000 # minlong, as a double (high word)
+.LDOUBLE_TO_LONG_ret_max:
+ .dword 0x7fffffffffffffff
+.LDOUBLE_TO_LONG_ret_min:
+ .dword 0x8000000000000000
+
+/* continuation for OP_MUL_LONG */
+
+.LOP_MUL_LONG_finish:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(v0, v1, a0) # vAA::vAA+1 <- v0(low) :: v1(high)
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_VOLATILE */
+
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_VOLATILE_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ SMP_DMB # acquiring load
+ GET_OPA4(a2) # a2 <- A+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_VOLATILE */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_VOLATILE_finish:
+ #BAL(common_squeak0)
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ SMP_DMB_ST # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ SMP_DMB
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_SGET_VOLATILE */
+
+.LOP_SGET_VOLATILE_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ SMP_DMB # acquiring load
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[AA] <- a1
+
+/* continuation for OP_SPUT_VOLATILE */
+
+.LOP_SPUT_VOLATILE_finish:
+ # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SMP_DMB_ST # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ SMP_DMB
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_IGET_OBJECT_VOLATILE */
+
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_OBJECT_VOLATILE_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ SMP_DMB # acquiring load
+ GET_OPA4(a2) # a2 <- A+
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(a0, a2) # fp[A] <- a0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE_VOLATILE */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IGET_WIDE_VOLATILE_finish:
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ beqz rOBJ, common_errNullObject # object was null
+ GET_OPA4(a2) # a2 <- A+
+ addu rOBJ, rOBJ, a3 # form address
+ .if 1
+ vLOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .else
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a3, rFP, a2) # a3 <- &fp[A]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # fp[A] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE_VOLATILE */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_WIDE_VOLATILE_finish:
+ GET_OPA4(a2) # a2 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ EAS2(a2, rFP, a2) # a2 <- &fp[A]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ LOAD64(a0, a1, a2) # a0/a1 <- fp[A]
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ addu a2, rOBJ, a3 # form address
+ .if 1
+ JAL(dvmQuasiAtomicSwap64Sync) # stores r0/r1 into addr r2
+# STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .else
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
+
+/* continuation for OP_SGET_WIDE_VOLATILE */
+
+.LOP_SGET_WIDE_VOLATILE_finish:
+ GET_OPA(a1) # a1 <- AA
+ .if 1
+ vLOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .else
+ LOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .endif
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ EAS2(a1, rFP, a1) # a1 <- &fp[AA]
+ STORE64(a2, a3, a1) # vAA/vAA+1 <- a2/a3
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+
+/* continuation for OP_SPUT_WIDE_VOLATILE */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: BBBB field ref
+ * rOBJ: &fp[AA]
+ * rBIX: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in a2.
+ */
+.LOP_SPUT_WIDE_VOLATILE_resolve:
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ # success ?
+ move a0, v0
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ move a2, v0
+ b .LOP_SPUT_WIDE_VOLATILE_finish # resume
+
+/* continuation for OP_EXECUTE_INLINE */
+
+ /*
+ * Extract args, call function.
+ * a0 = #of args (0-4)
+ * rBIX = call index
+ *
+ * Other ideas:
+ * - Use a jump table from the main piece to jump directly into the
+ * AND/LW pairs. Costs a data load, saves a branch.
+ * - Have five separate pieces that do the loading, so we can work the
+ * interleave a little better. Increases code size.
+ */
+.LOP_EXECUTE_INLINE_continue:
+ FETCH(rINST, 2) # rINST <- FEDC
+ beq a0, 0, 0f
+ beq a0, 1, 1f
+ beq a0, 2, 2f
+ beq a0, 3, 3f
+ beq a0, 4, 4f
+ JAL(common_abort) # too many arguments
+
+4:
+ and t0, rINST, 0xf000 # isolate F
+ ESRN(t1, rFP, t0, 10)
+ lw a3, 0(t1) # a3 <- vF (shift right 12, left 2)
+3:
+ and t0, rINST, 0x0f00 # isolate E
+ ESRN(t1, rFP, t0, 6)
+ lw a2, 0(t1) # a2 <- vE
+2:
+ and t0, rINST, 0x00f0 # isolate D
+ ESRN(t1, rFP, t0, 2)
+ lw a1, 0(t1) # a1 <- vD
+1:
+ and t0, rINST, 0x000f # isolate C
+ EASN(t1, rFP, t0, 2)
+ lw a0, 0(t1) # a0 <- vC
+0:
+ la rINST, gDvmInlineOpsTable # table of InlineOperation
+ EAS4(t1, rINST, rBIX) # t1 <- rINST + rBIX<<4
+ lw t9, 0(t1)
+ jr t9 # sizeof=16, "func" is first entry
+ # (not reached)
+
+ /*
+ * We're debugging or profiling.
+ * rBIX: opIndex
+ */
+.LOP_EXECUTE_INLINE_debugmode:
+ move a0, rBIX
+ JAL(dvmResolveInlineNative)
+ beqz v0, .LOP_EXECUTE_INLINE_resume # did it resolve? no, just move on
+ move rOBJ, v0 # remember method
+ move a0, v0
+ move a1, rSELF
+ JAL(dvmFastMethodTraceEnter) # (method, self)
+ addu a1, rSELF, offThread_retval # a1<- &self->retval
+ GET_OPB(a0) # a0 <- B
+ # Stack should have 16/20 available
+ sw a1, 16(sp) # push &self->retval
+ BAL(.LOP_EXECUTE_INLINE_continue) # make call; will return after
+ lw gp, STACK_OFFSET_GP(sp) # restore gp
+ move rINST, v0 # save result of inline
+ move a0, rOBJ # a0<- method
+ move a1, rSELF # a1<- self
+ JAL(dvmFastMethodTraceExit) # (method, self)
+ beqz v0, common_exceptionThrown # returned false, handle exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_EXECUTE_INLINE_RANGE */
+
+ /*
+ * Extract args, call function.
+ * a0 = #of args (0-4)
+ * rBIX = call index
+ * ra = return addr, above [DO NOT JAL out of here w/o preserving ra]
+ */
+.LOP_EXECUTE_INLINE_RANGE_continue:
+ FETCH(rOBJ, 2) # rOBJ <- CCCC
+ beq a0, 0, 0f
+ beq a0, 1, 1f
+ beq a0, 2, 2f
+ beq a0, 3, 3f
+ beq a0, 4, 4f
+ JAL(common_abort) # too many arguments
+
+4:
+ add t0, rOBJ, 3
+ GET_VREG(a3, t0)
+3:
+ add t0, rOBJ, 2
+ GET_VREG(a2, t0)
+2:
+ add t0, rOBJ, 1
+ GET_VREG(a1, t0)
+1:
+ GET_VREG(a0, rOBJ)
+0:
+ la rOBJ, gDvmInlineOpsTable # table of InlineOperation
+ EAS4(t1, rOBJ, rBIX) # t1 <- rINST + rBIX<<4
+ lw t9, 0(t1)
+ jr t9 # sizeof=16, "func" is first entry
+ # not reached
+
+ /*
+ * We're debugging or profiling.
+ * rBIX: opIndex
+ */
+.LOP_EXECUTE_INLINE_RANGE_debugmode:
+ move a0, rBIX
+ JAL(dvmResolveInlineNative)
+ beqz v0, .LOP_EXECUTE_INLINE_RANGE_resume # did it resolve? no, just move on
+ move rOBJ, v0 # remember method
+ move a0, v0
+ move a1, rSELF
+ JAL(dvmFastMethodTraceEnter) # (method, self)
+ addu a1, rSELF, offThread_retval # a1<- &self->retval
+ GET_OPA(a0) # a0 <- A
+ # Stack should have 16/20 available
+ sw a1, 16(sp) # push &self->retval
+ move rINST, rOBJ # rINST<- method
+ BAL(.LOP_EXECUTE_INLINE_RANGE_continue) # make call; will return after
+ lw gp, STACK_OFFSET_GP(sp) # restore gp
+ move rOBJ, v0 # save result of inline
+ move a0, rINST # a0<- method
+ move a1, rSELF # a1<- self
+ JAL(dvmFastNativeMethodTraceExit) # (method, self)
+ beqz rOBJ, common_exceptionThrown # returned false, handle exception
+ FETCH_ADVANCE_INST(3) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_INVOKE_OBJECT_INIT_RANGE */
+ /*
+ * A debugger is attached, so we need to go ahead and do
+ * this. For simplicity, we'll just jump directly to the
+ * corresponding handler. Note that we can't use
+ * rIBASE here because it may be in single-step mode.
+ * Load the primary table base directly.
+ */
+.LOP_INVOKE_OBJECT_INIT_RANGE_debugger:
+ lw a1, offThread_mainHandlerTable(rSELF)
+ .if 0
+ li t0, OP_INVOKE_DIRECT_JUMBO
+ .else
+ li t0, OP_INVOKE_DIRECT_RANGE
+ .endif
+ GOTO_OPCODE_BASE(a1, t0) # execute it
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_OBJECT_VOLATILE_finish:
+ #BAL(common_squeak0)
+ GET_OPA4(a1) # a1 <- A+
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ GET_VREG(a0, a1) # a0 <- fp[A]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu t2, rOBJ, a3 # form address
+ SMP_DMB_ST # releasing store
+ sw a0, (t2) # obj.field (32 bits) <- a0
+ SMP_DMB
+ beqz a0, 1f # stored a null reference?
+ srl t1, rOBJ, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, (t2) # mark card if not
+1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_SGET_OBJECT_VOLATILE */
+
+.LOP_SGET_OBJECT_VOLATILE_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ SMP_DMB # acquiring load
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[AA] <- a1
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE */
+.LOP_SPUT_OBJECT_VOLATILE_finish: # field ptr in a0
+ GET_OPA(a2) # a2 <- AA
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[AA]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ lw t1, offField_clazz(a0) # t1 <- field->clazz
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SMP_DMB_ST # releasing store
+ sw a1, offStaticField_value(a0) # field <- vAA
+ SMP_DMB
+ beqz a1, 1f
+ srl t2, t1, GC_CARD_SHIFT
+ addu t3, a2, t2
+ sb a2, (t3)
+1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_CHECK_CAST_JUMBO */
+
+
+.LOP_CHECK_CAST_JUMBO_castfailure:
+ # A cast has failed. We need to throw a ClassCastException with the
+ # class of the object that failed to be cast.
+ EXPORT_PC() # about to throw
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0<- obj->clazz
+ move a1,rBIX # r1<- desired class
+ JAL(dvmThrowClassCastException)
+ b common_exceptionThrown
+
+ /*
+ * Advance PC and get next opcode
+ *
+ */
+.LOP_CHECK_CAST_JUMBO_okay:
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a2 holds AAAAAAAA
+ * rOBJ holds object
+ */
+.LOP_CHECK_CAST_JUMBO_resolve:
+ EXPORT_PC() # resolve() could throw
+ LOAD_rSELF_method(a3) # a3<- self->method
+ move a1, a2 # a1<- AAAAAAAA
+ li a2, 0 # a2<- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0<- method->clazz
+ JAL(dvmResolveClass) # v0<- resolved ClassObject ptr
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ move a1, v0 # a1<- class resolved from AAAAAAAA
+ LOAD_base_offObject_clazz(a0, rOBJ) # a0<- obj->clazz
+ b .LOP_CHECK_CAST_JUMBO_resolved # pick up where we left off
+
+
+
+/* continuation for OP_INSTANCE_OF_JUMBO */
+
+ /*
+ * Class resolved, determine type of check necessary. This is common.
+ * r0 holds obj->clazz
+ * r1 holds class resolved from AAAAAAAA
+ * r9 holds BBBB
+ */
+
+.LOP_INSTANCE_OF_JUMBO_resolved: # a0=obj->clazz, a1=resolved class
+ # same class (trivial success)?
+ beq a0, a1, .LOP_INSTANCE_OF_JUMBO_trivial # yes, trivial finish
+ # fall through to OP_INSTANCE_OF_JUMBO_fullcheck
+
+ /*
+ * Trivial test failed, need to perform full check. This is common.
+ * a0 holds obj->clazz
+ * a1 holds class resolved from AAAAAAAA
+ * rOBJ holds BBBB
+ */
+.LOP_INSTANCE_OF_JUMBO_fullcheck:
+ JAL(dvmInstanceofNonTrivial) # v0 <- boolean result
+ move a0, v0
+ b .LOP_INSTANCE_OF_JUMBO_store # go to OP_INSTANCE_OF_JUMBO_store
+
+.LOP_INSTANCE_OF_JUMBO_trivial:
+ li a0, 1 # indicate success
+ # fall thru
+ /*
+ * a0 holds boolean result
+ * rOBJ holds BBBB
+ */
+.LOP_INSTANCE_OF_JUMBO_store:
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, rOBJ) # vBBBB <- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a3 holds AAAAAAAA
+ * rOBJ holds BBBB
+ */
+.LOP_INSTANCE_OF_JUMBO_resolve:
+ EXPORT_PC() # resolve() could throw
+ LOAD_rSELF_method(a0) # a0 <- self->method
+ move a1, a3 # a1 <- AAAAAAAA
+ li a2, 1 # a2 <- true
+ LOAD_base_offMethod_clazz(a0, a0) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ # got null?
+ move a1, v0 # a1 <- class resolved from BBB
+ beqz v0, common_exceptionThrown # yes, handle exception
+ FETCH(ra, 4) # a3<- vCCCC
+ move a1, a0 # a1<- class resolved from AAAAAAAA
+
+ GET_VREG(a0, a3) # a0 <- vCCCC (object)
+ LOAD_base_offObject_clazz(a0, a0) # a0 <- obj->clazz
+ b .LOP_INSTANCE_OF_JUMBO_resolved # pick up where we left off
+
+
+/* continuation for OP_NEW_INSTANCE_JUMBO */
+
+.LOP_NEW_INSTANCE_JUMBO_continue:
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(v0, a3) # vBBBB <- v0
+ GOTO_OPCODE(t0) # jump to next instruction
+
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we need to stop the trace building early.
+ * v0: new object
+ * a3: vAA
+ */
+.LOP_NEW_INSTANCE_JUMBO_jitCheck:
+ lw a1, 0(rBIX) # reload resolved class
+ # okay?
+ bnez a1, .LOP_NEW_INSTANCE_JUMBO_continue # yes, finish
+ move rOBJ, v0 # preserve new object
+ move rBIX, a3 # preserve vAA
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) # (self, pc)
+ FETCH_ADVANCE_INST(2) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(rOBJ, rBIX) # vAA <- new object
+ GOTO_OPCODE(t0) # jump to next instruction
+#endif
+
+ /*
+ * Class initialization required.
+ *
+ * a0 holds class object
+ */
+.LOP_NEW_INSTANCE_JUMBO_needinit:
+ JAL(dvmInitClass) # initialize class
+ move a0, rOBJ # restore a0
+ # check boolean result
+ bnez v0, .LOP_NEW_INSTANCE_JUMBO_initialized # success, continue
+ b common_exceptionThrown # failed, deal with init exception
+
+
+ /*
+ * Resolution required. This is the least-likely path.
+ *
+ * a1 holds AAAAAAAA
+ */
+.LOP_NEW_INSTANCE_JUMBO_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- resolved ClassObject ptr
+ move a0, v0
+ # got null?
+ bnez v0, .LOP_NEW_INSTANCE_JUMBO_resolved # no, continue
+ b common_exceptionThrown # yes, handle exception
+
+/* continuation for OP_NEW_ARRAY_JUMBO */
+
+ /*
+ * Finish allocation.
+ *
+ * a0 holds class
+ * a1 holds array length
+ */
+.LOP_NEW_ARRAY_JUMBO_finish:
+ li a2, ALLOC_DONT_TRACK # don't track in local refs table
+ JAL(dvmAllocArrayByClass) # v0 <- call(clazz, length, flags)
+ FETCH(a2, 3) # r2<- vBBBB
+ # failed?
+ beqz v0, common_exceptionThrown # yes, handle the exception
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG(v0, a2) # vBBBB <- v0
+ GOTO_OPCODE(t0) # jump to next instruction
+#%break
+
+
+
+ /*
+ * Resolve class. (This is an uncommon case.)
+ *
+ * a1 holds array length
+ * a2 holds class ref AAAAAAAA
+ */
+.LOP_NEW_ARRAY_JUMBO_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ move rOBJ, a1 # rOBJ <- length (save)
+ move a1, a2 # a1 <- AAAAAAAA
+ li a2, 0 # a2 <- false
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ JAL(dvmResolveClass) # v0 <- call(clazz, ref)
+ move a1, rOBJ # a1 <- length (restore)
+ # got null?
+ beqz v0, common_exceptionThrown # yes, handle exception
+ move a0, v0
+ b .LOP_NEW_ARRAY_JUMBO_finish # continue with to OP_NEW_ARRAY_JUMBO_finish
+
+
+
+/* continuation for OP_FILLED_NEW_ARRAY_JUMBO */
+
+ /*
+ * On entry:
+ * a0 holds array class
+ * rOBJ holds AA or BA
+ */
+.LOP_FILLED_NEW_ARRAY_JUMBO_continue:
+ LOAD_base_offClassObject_descriptor(a3, a0) # a3 <- arrayClass->descriptor
+ li a2, ALLOC_DONT_TRACK # a2 <- alloc flags
+ lbu rINST, 1(a3) # rINST <- descriptor[1]
+ FETCH(a1, 3) # a1<- BBBB (length)
+ seq t0, rINST, 'I' # array of ints?
+ seq t1, rINST, 'L' # array of objects?
+ or t0, t1
+ seq t1, rINST, '[' # array of arrays?
+ or t0, t1
+ move rBIX, a1 # save length in rBIX
+ beqz t0, .LOP_FILLED_NEW_ARRAY_JUMBO_notimpl # no, not handled yet
+ JAL(dvmAllocArrayByClass) # v0 <- call(arClass, length, flags)
+ # null return?
+ beqz v0, common_exceptionThrown # alloc failed, handle exception
+
+ FETCH(a1, 4) # a1 CCCC
+ sw v0, offThread_retval(rSELF) # retval.l <- new array
+ sw rINST, (offThread_retval+4)(rSELF) # retval.h <- type
+ addu a0, v0, offArrayObject_contents # a0 <- newArray->contents
+ subu rBIX, rBIX, 1 # length--, check for neg
+ FETCH_ADVANCE_INST(5) # advance to next instr, load rINST
+ bltz rBIX, 2f # was zero, bail
+
+ # copy values from registers into the array
+ # a0=array, a1=CCCC, t0=BBBB(length)
+ move t0, rBIX
+ EAS2(a2, rFP, a1) # a2 <- &fp[CCCC]
+1:
+ lw a3, 0(a2) # a3 <- *a2++
+ addu a2, 4
+ subu t0, t0, 1 # count--
+ sw a3, (a0) # *contents++ = vX
+ addu a0, 4
+ bgez t0, 1b
+
+2:
+ lw a0, offThread_retval(rSELF) # a0 <- object
+ lw a1, (offThread_retval+4)(rSELF) # a1 <- type
+ seq t1, a1, 'I' # Is int array?
+ bnez t1, 3f
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ srl t3, a0, GC_CARD_SHIFT
+ addu t2, a2, t3
+ sb a2, (t2)
+3:
+ GET_INST_OPCODE(t0) # ip <- opcode from rINST
+ GOTO_OPCODE(t0) # execute it
+
+
+ /*
+ * Throw an exception indicating that we have not implemented this
+ * mode of filled-new-array.
+ */
+.LOP_FILLED_NEW_ARRAY_JUMBO_notimpl:
+ la a0, .LstrFilledNewArrayNotImpl
+ JAL(dvmThrowInternalError)
+ b common_exceptionThrown
+
+/* continuation for OP_IGET_JUMBO */
+
+.LOP_IGET_JUMBO_resolved:
+ # test results
+ move a0, v0
+ beqz a0,common_exceptionThrown
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, a2) # fp[BBBB]<- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE_JUMBO */
+
+.LOP_IGET_WIDE_JUMBO_resolved:
+ # test return code
+ move a0, v0
+ bnez v0, .LOP_IGET_WIDE_JUMBO_finish
+ b common_exceptionThrown
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IGET_WIDE_JUMBO_finish:
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ beqz rOBJ, common_errNullObject # object was null
+ GET_OPA4(a2) # a2 <- A+
+ addu rOBJ, rOBJ, a3 # form address
+ .if 0
+ vLOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .else
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .endif
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ EAS2(a3, rFP, a2) # a3 <- &fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # fp[BBBB] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_OBJECT_JUMBO */
+
+.LOP_IGET_OBJECT_JUMBO_resolved:
+ # test results
+ move a0, v0
+ beqz a0,common_exceptionThrown
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_OBJECT_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, a2) # fp[BBBB]<- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_BOOLEAN_JUMBO */
+
+.LOP_IGET_BOOLEAN_JUMBO_resolved:
+ # test results
+ move a0, v0
+ beqz a0,common_exceptionThrown
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_BOOLEAN_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, a2) # fp[BBBB]<- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_BYTE_JUMBO */
+
+.LOP_IGET_BYTE_JUMBO_resolved:
+ # test results
+ move a0, v0
+ beqz a0,common_exceptionThrown
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_BYTE_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, a2) # fp[BBBB]<- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_CHAR_JUMBO */
+
+.LOP_IGET_CHAR_JUMBO_resolved:
+ # test results
+ move a0, v0
+ beqz a0,common_exceptionThrown
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_CHAR_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, a2) # fp[BBBB]<- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_SHORT_JUMBO */
+
+.LOP_IGET_SHORT_JUMBO_resolved:
+ # test results
+ move a0, v0
+ beqz a0,common_exceptionThrown
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_SHORT_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ # noop # acquiring load
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, a2) # fp[BBBB]<- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_JUMBO */
+
+.LOP_IPUT_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_JUMBO_finish
+
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE_JUMBO */
+
+.LOP_IPUT_WIDE_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_WIDE_JUMBO_finish
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_WIDE_JUMBO_finish:
+ FETCH(a2, 3) # a1<- BBBB
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ EAS2(a2, rFP, a2) # a2 <- &fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ LOAD64(a0, a1, a2) # a0/a1 <- fp[BBBB]
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ addu a2, rOBJ, a3 # form address
+ .if 0
+ JAL(dvmQuasiAtomicSwap64Sync) # stores r0/r1 into addr r2
+# STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .else
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
+
+
+/* continuation for OP_IPUT_OBJECT_JUMBO */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_OBJECT_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_OBJECT_JUMBO_finish
+
+.LOP_IPUT_OBJECT_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu t2, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (t2) # obj.field (32 bits) <- a0
+ # noop
+ beqz a0, 1f # stored a null reference?
+ srl t1, rOBJ, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, (t2) # mark card if not
+1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_BOOLEAN_JUMBO */
+
+.LOP_IPUT_BOOLEAN_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_BOOLEAN_JUMBO_finish
+
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_BOOLEAN_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_BYTE_JUMBO */
+
+.LOP_IPUT_BYTE_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_BYTE_JUMBO_finish
+
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_BYTE_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_CHAR_JUMBO */
+
+.LOP_IPUT_CHAR_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_CHAR_JUMBO_finish
+
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_CHAR_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_SHORT_JUMBO */
+
+.LOP_IPUT_SHORT_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_SHORT_JUMBO_finish
+
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_SHORT_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ # noop # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ # noop
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_SGET_JUMBO */
+
+.LOP_SGET_JUMBO_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[BBBB] <- a1
+
+/* continuation for OP_SGET_WIDE_JUMBO */
+
+.LOP_SGET_WIDE_JUMBO_finish:
+ FETCH(a1, 3) # a1<- BBBB
+ .if 0
+ vLOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .else
+ LOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .endif
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ EAS2(a1, rFP, a1) # a1 <- &fp[BBBB]
+ STORE64(a2, a3, a1) # vBBBB/vBBBB+1 <- a2/a3
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SGET_OBJECT_JUMBO */
+
+.LOP_SGET_OBJECT_JUMBO_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[BBBB] <- a1
+
+/* continuation for OP_SGET_BOOLEAN_JUMBO */
+
+.LOP_SGET_BOOLEAN_JUMBO_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[BBBB] <- a1
+
+/* continuation for OP_SGET_BYTE_JUMBO */
+
+.LOP_SGET_BYTE_JUMBO_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[BBBB] <- a1
+
+/* continuation for OP_SGET_CHAR_JUMBO */
+
+.LOP_SGET_CHAR_JUMBO_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[BBBB] <- a1
+
+/* continuation for OP_SGET_SHORT_JUMBO */
+
+.LOP_SGET_SHORT_JUMBO_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ # no-op # acquiring load
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[BBBB] <- a1
+
+/* continuation for OP_SPUT_JUMBO */
+
+.LOP_SPUT_JUMBO_finish:
+ # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_WIDE_JUMBO */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rOBJ: &fp[BBBB]
+ * rBIX: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in a2.
+ */
+.LOP_SPUT_WIDE_JUMBO_resolve:
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ # success ?
+ move a0, v0
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ move a2, v0
+ b .LOP_SPUT_WIDE_JUMBO_finish # resume
+
+/* continuation for OP_SPUT_OBJECT_JUMBO */
+.LOP_SPUT_OBJECT_JUMBO_finish: # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ lw t1, offField_clazz(a0) # t1 <- field->clazz
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ # no-op
+ beqz a1, 1f
+ srl t2, t1, GC_CARD_SHIFT
+ addu t3, a2, t2
+ sb a2, (t3)
+ 1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_BOOLEAN_JUMBO */
+
+.LOP_SPUT_BOOLEAN_JUMBO_finish:
+ # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_BYTE_JUMBO */
+
+.LOP_SPUT_BYTE_JUMBO_finish:
+ # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_CHAR_JUMBO */
+
+.LOP_SPUT_CHAR_JUMBO_finish:
+ # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_SHORT_JUMBO */
+
+.LOP_SPUT_SHORT_JUMBO_finish:
+ # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ # no-op # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ # no-op
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_INVOKE_VIRTUAL_JUMBO */
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX= C or CCCC (index of first arg, which is the "this" ptr)
+ */
+.LOP_INVOKE_VIRTUAL_JUMBO_continue:
+ FETCH(rBIX,4) # rBIX <- CCCC
+ GET_VREG(rOBJ, rBIX) # rOBJ <- "this" ptr
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ # is "this" null?
+ beqz rOBJ, common_errNullObject # null "this", throw exception
+ LOAD_base_offObject_clazz(a3, rOBJ) # a3 <- thisPtr->clazz
+ LOAD_base_offClassObject_vtable(a3, a3) # a3 <- thisPtr->clazz->vtable
+ LOAD_eas2(a0, a3, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethodJumbo # (a0=method, rOBJ="this")
+
+
+/* continuation for OP_INVOKE_SUPER_JUMBO */
+
+ /*
+ * At this point:
+ * a0 = resolved base method
+ * rBIX = method->clazz
+ */
+.LOP_INVOKE_SUPER_JUMBO_continue:
+ LOAD_base_offClassObject_super(a1, rBIX) # a1 <- method->clazz->super
+ LOADu2_offMethod_methodIndex(a2, a0) # a2 <- baseMethod->methodIndex
+ LOAD_base_offClassObject_vtableCount(a3, a1) # a3 <- super->vtableCount
+ EXPORT_PC() # must export for invoke
+ # compare (methodIndex, vtableCount)
+ bgeu a2, a3, .LOP_INVOKE_SUPER_JUMBO_nsm # method not present in superclass
+ LOAD_base_offClassObject_vtable(a1, a1) # a1 <- ...clazz->super->vtable
+ LOAD_eas2(a0, a1, a2) # a0 <- vtable[methodIndex]
+ b common_invokeMethodJumbo # a0=method rOBJ="this"
+
+ /*
+ * Throw a NoSuchMethodError with the method name as the message.
+ * a0 = resolved base method
+ */
+.LOP_INVOKE_SUPER_JUMBO_nsm:
+ LOAD_base_offMethod_name(a1, a0) # a1 <- method name
+ b common_errNoSuchMethod
+
+
+/* continuation for OP_INVOKE_STATIC_JUMBO */
+
+.LOP_INVOKE_STATIC_JUMBO_resolve:
+ LOAD_rSELF_method(a3) # a3 <- self->method
+ LOAD_base_offMethod_clazz(a0, a3) # a0 <- method->clazz
+ li a2, METHOD_STATIC # resolver method type
+ JAL(dvmResolveMethod) # v0 <- call(clazz, ref, flags)
+ move a0, v0
+#if defined(WITH_JIT)
+ /*
+ * Check to see if we're actively building a trace. If so,
+ * we need to keep this instruction out of it.
+ * rBIX: &resolved_methodToCall
+ */
+ lhu a2, offThread_subMode(rSELF)
+ beqz v0, common_exceptionThrown # null, handle exception
+ and a2, kSubModeJitTraceBuild # trace under construction?
+ beqz a2, common_invokeMethodJumboNoThis # no, (a0=method, rOBJ="this")
+ lw a1, 0(rBIX) # reload resolved method
+ # finished resloving?
+ bnez a1, common_invokeMethodJumboNoThis # yes, (a0=method, rOBJ="this")
+ move rBIX, a0 # preserve method
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) # (self, pc)
+ move a0, rBIX
+ b common_invokeMethodJumboNoThis # whew, finally!
+#else
+ # got null?
+ bnez v0, common_invokeMethodJumboNoThis # (a0=method, rOBJ="this")
+ b common_exceptionThrown # yes, handle exception
+#endif
+
+/* continuation for OP_INVOKE_OBJECT_INIT_JUMBO */
+ /*
+ * A debugger is attached, so we need to go ahead and do
+ * this. For simplicity, we'll just jump directly to the
+ * corresponding handler. Note that we can't use
+ * rIBASE here because it may be in single-step mode.
+ * Load the primary table base directly.
+ */
+.LOP_INVOKE_OBJECT_INIT_JUMBO_debugger:
+ lw a1, offThread_mainHandlerTable(rSELF)
+ .if 1
+ li t0, OP_INVOKE_DIRECT_JUMBO
+ .else
+ li t0, OP_INVOKE_DIRECT_RANGE
+ .endif
+ GOTO_OPCODE_BASE(a1, t0) # execute it
+
+/* continuation for OP_IGET_VOLATILE_JUMBO */
+
+.LOP_IGET_VOLATILE_JUMBO_resolved:
+ # test results
+ move a0, v0
+ beqz a0,common_exceptionThrown
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_VOLATILE_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ SMP_DMB # acquiring load
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, a2) # fp[BBBB]<- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_WIDE_VOLATILE_JUMBO */
+
+.LOP_IGET_WIDE_VOLATILE_JUMBO_resolved:
+ # test return code
+ move a0, v0
+ bnez v0, .LOP_IGET_WIDE_VOLATILE_JUMBO_finish
+ b common_exceptionThrown
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IGET_WIDE_VOLATILE_JUMBO_finish:
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ beqz rOBJ, common_errNullObject # object was null
+ GET_OPA4(a2) # a2 <- A+
+ addu rOBJ, rOBJ, a3 # form address
+ .if 1
+ vLOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .else
+ LOAD64(a0, a1, rOBJ) # a0/a1 <- obj.field (64-bit align ok)
+ .endif
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ EAS2(a3, rFP, a2) # a3 <- &fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ STORE64(a0, a1, a3) # fp[BBBB] <- a0/a1
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IGET_OBJECT_VOLATILE_JUMBO */
+
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_resolved:
+ # test results
+ move a0, v0
+ beqz a0,common_exceptionThrown
+ /*
+ * Currently:
+ * v0 holds resolved field
+ * rOBJ holds object (caller saved)
+ */
+.LOP_IGET_OBJECT_VOLATILE_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ addu a3, a3, rOBJ # form address
+ lw a0, (a3) # a0 <- obj.field (8/16/32 bits)
+ SMP_DMB # acquiring load
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ SET_VREG(a0, a2) # fp[BBBB]<- a0
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_VOLATILE_JUMBO */
+
+.LOP_IPUT_VOLATILE_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_VOLATILE_JUMBO_finish
+
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_VOLATILE_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu rOBJ, rOBJ, a3 # form address
+ SMP_DMB_ST # releasing store
+ sw a0, (rOBJ) # obj.field (8/16/32 bits) <- a0
+ SMP_DMB
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_IPUT_WIDE_VOLATILE_JUMBO */
+
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_WIDE_VOLATILE_JUMBO_finish
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_WIDE_VOLATILE_JUMBO_finish:
+ FETCH(a2, 3) # a1<- BBBB
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ EAS2(a2, rFP, a2) # a2 <- &fp[BBBB]
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ LOAD64(a0, a1, a2) # a0/a1 <- fp[BBBB]
+ GET_INST_OPCODE(rBIX) # extract opcode from rINST
+ addu a2, rOBJ, a3 # form address
+ .if 1
+ JAL(dvmQuasiAtomicSwap64Sync) # stores r0/r1 into addr r2
+# STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .else
+ STORE64(a0, a1, a2) # obj.field (64 bits, aligned) <- a0 a1
+ .endif
+ GOTO_OPCODE(rBIX) # jump to next instruction
+
+
+
+/* continuation for OP_IPUT_OBJECT_VOLATILE_JUMBO */
+
+ /*
+ * Currently:
+ * a0 holds resolved field
+ * rOBJ holds object
+ */
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_resolved:
+ move a0, v0
+ beqz a0, common_exceptionThrown
+ # fall through to OP_IPUT_OBJECT_VOLATILE_JUMBO_finish
+
+.LOP_IPUT_OBJECT_VOLATILE_JUMBO_finish:
+ #BAL(common_squeak0)
+ LOAD_base_offInstField_byteOffset(a3, a0) # a3 <- byte offset of field
+ FETCH(a1, 3) # a1<- BBBB
+ GET_VREG(a0, a1) # a0 <- fp[BBBB]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ # check object for null
+ beqz rOBJ, common_errNullObject # object was null
+ FETCH_ADVANCE_INST(5) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ addu t2, rOBJ, a3 # form address
+ SMP_DMB_ST # releasing store
+ sw a0, (t2) # obj.field (32 bits) <- a0
+ SMP_DMB
+ beqz a0, 1f # stored a null reference?
+ srl t1, rOBJ, GC_CARD_SHIFT
+ addu t2, a2, t1
+ sb a2, (t2) # mark card if not
+1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
+
+/* continuation for OP_SGET_VOLATILE_JUMBO */
+
+.LOP_SGET_VOLATILE_JUMBO_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ SMP_DMB # acquiring load
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[BBBB] <- a1
+
+/* continuation for OP_SGET_WIDE_VOLATILE_JUMBO */
+
+.LOP_SGET_WIDE_VOLATILE_JUMBO_finish:
+ FETCH(a1, 3) # a1<- BBBB
+ .if 1
+ vLOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .else
+ LOAD64_off(a2, a3, a0, offStaticField_value) # a2/a3 <- field value (aligned)
+ .endif
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ EAS2(a1, rFP, a1) # a1 <- &fp[BBBB]
+ STORE64(a2, a3, a1) # vBBBB/vBBBB+1 <- a2/a3
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SGET_OBJECT_VOLATILE_JUMBO */
+
+.LOP_SGET_OBJECT_VOLATILE_JUMBO_finish:
+ LOAD_base_offStaticField_value(a1, a0) # a1 <- field value
+ SMP_DMB # acquiring load
+ FETCH(a2, 3) # r2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SET_VREG_GOTO(a1, a2, t0) # fp[BBBB] <- a1
+
+/* continuation for OP_SPUT_VOLATILE_JUMBO */
+
+.LOP_SPUT_VOLATILE_JUMBO_finish:
+ # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SMP_DMB_ST # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ SMP_DMB
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/* continuation for OP_SPUT_WIDE_VOLATILE_JUMBO */
+
+ /*
+ * Continuation if the field has not yet been resolved.
+ * a1: AAAAAAAA field ref
+ * rOBJ: &fp[BBBB]
+ * rBIX: dvmDex->pResFields
+ *
+ * Returns StaticField pointer in a2.
+ */
+.LOP_SPUT_WIDE_VOLATILE_JUMBO_resolve:
+ LOAD_rSELF_method(a2) # a2 <- current method
+#if defined(WITH_JIT)
+ EAS2(rBIX, rBIX, a1) # rBIX<- &dvmDex->pResFields[field]
+#endif
+ EXPORT_PC() # resolve() could throw, so export now
+ LOAD_base_offMethod_clazz(a0, a2) # a0 <- method->clazz
+ JAL(dvmResolveStaticField) # v0 <- resolved StaticField ptr
+ # success ?
+ move a0, v0
+ beqz v0, common_exceptionThrown # no, handle exception
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including this instruction.
+ */
+ JAL(common_verifyField)
+#endif
+ move a2, v0
+ b .LOP_SPUT_WIDE_VOLATILE_JUMBO_finish # resume
+
+/* continuation for OP_SPUT_OBJECT_VOLATILE_JUMBO */
+.LOP_SPUT_OBJECT_VOLATILE_JUMBO_finish: # field ptr in a0
+ FETCH(a2, 3) # a2<- BBBB
+ FETCH_ADVANCE_INST(4) # advance rPC, load rINST
+ GET_VREG(a1, a2) # a1 <- fp[BBBB]
+ lw a2, offThread_cardTable(rSELF) # a2 <- card table base
+ lw t1, offField_clazz(a0) # t1 <- field->clazz
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ SMP_DMB_ST # releasing store
+ sw a1, offStaticField_value(a0) # field <- vBBBB
+ SMP_DMB
+ beqz a1, 1f
+ srl t2, t1, GC_CARD_SHIFT
+ addu t3, a2, t2
+ sb a2, (t3)
+ 1:
+ GOTO_OPCODE(t0) # jump to next instruction
+
+ .size dvmAsmSisterStart, .-dvmAsmSisterStart
+ .global dvmAsmSisterEnd
+dvmAsmSisterEnd:
+
+/* File: mips/footer.S */
+/*
+ * ===========================================================================
+ * Common subroutines and data
+ * ===========================================================================
+ */
+
+ .text
+ .align 2
+
+#if defined(WITH_JIT)
+#if defined(WITH_SELF_VERIFICATION)
+
+/*
+ * "longjmp" to a translation after single-stepping. Before returning
+ * to translation, must save state for self-verification.
+ */
+ .global dvmJitResumeTranslation # (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+ move rSELF, a0 # restore self
+ move rPC, a1 # restore Dalvik pc
+ move rFP, a2 # restore Dalvik fp
+ lw rBIX, offThread_jitResumeNPC(rSELF)
+ sw zero, offThread_jitResumeNPC(rSELF) # reset resume address
+ lw sp, offThread_jitResumeNSP(rSELF) # cut back native stack
+ b jitSVShadowRunStart # resume as if cache hit
+ # expects resume addr in rBIX
+
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ li a2, kSVSPunt # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ move rPC, a0 # set up dalvik pc
+ EXPORT_PC()
+ sw ra, offThread_jitResumeNPC(rSELF)
+ sw a1, offThread_jitResumeDPC(rSELF)
+ li a2, kSVSSingleStep # a2 <- interpreter entry point
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+ move a0, rPC # pass our target PC
+ li a2, kSVSNoProfile # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+ move a0, rPC # pass our target PC
+ li a2, kSVSTraceSelect # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ lw a0, 0(ra) # pass our target PC
+ li a2, kSVSTraceSelect # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpBackwardBranch
+dvmJitToInterpBackwardBranch:
+ lw a0, 0(ra) # pass our target PC
+ li a2, kSVSBackwardBranch # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ lw a0, 0(ra) # pass our target PC
+ li a2, kSVSNormal # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ move a0, rPC # pass our target PC
+ li a2, kSVSNoChain # a2 <- interpreter entry point
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ b jitSVShadowRunEnd # doesn't return
+#else /* WITH_SELF_VERIFICATION */
+
+
+/*
+ * "longjmp" to a translation after single-stepping.
+ */
+ .global dvmJitResumeTranslation # (Thread* self, u4* dFP)
+dvmJitResumeTranslation:
+ move rSELF, a0 # restore self
+ move rPC, a1 # restore Dalvik pc
+ move rFP, a2 # restore Dalvik fp
+ lw a0, offThread_jitResumeNPC(rSELF)
+ sw zero, offThread_jitResumeNPC(rSELF) # reset resume address
+ lw sp, offThread_jitResumeNSP(rSELF) # cut back native stack
+ jr a0 # resume translation
+
+
+/*
+ * Return from the translation cache to the interpreter when the compiler is
+ * having issues translating/executing a Dalvik instruction. We have to skip
+ * the code cache lookup otherwise it is possible to indefinitely bouce
+ * between the interpreter and the code cache if the instruction that fails
+ * to be compiled happens to be at a trace start.
+ */
+ .global dvmJitToInterpPunt
+dvmJitToInterpPunt:
+ lw gp, STACK_OFFSET_GP(sp)
+ move rPC, a0
+#if defined(WITH_JIT_TUNING)
+ move a0, ra
+ JAL(dvmBumpPunt)
+#endif
+ EXPORT_PC()
+ sw zero, offThread_inJitCodeCache(rSELF) # Back to the interp land
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+/*
+ * Return to the interpreter to handle a single instruction.
+ * On entry:
+ * rPC <= Dalvik PC of instrucion to interpret
+ * a1 <= Dalvik PC of resume instruction
+ * ra <= resume point in translation
+ */
+
+ .global dvmJitToInterpSingleStep
+dvmJitToInterpSingleStep:
+ lw gp, STACK_OFFSET_GP(sp)
+ move rPC, a0 # set up dalvik pc
+ EXPORT_PC()
+ sw ra, offThread_jitResumeNPC(rSELF)
+ sw sp, offThread_jitResumeNSP(rSELF)
+ sw a1, offThread_jitResumeDPC(rSELF)
+ li a1, 1
+ sw a1, offThread_singleStepCount(rSELF) # just step once
+ move a0, rSELF
+ li a1, kSubModeCountedStep
+ JAL(dvmEnableSubMode) # (self, subMode)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target. Commonly used for callees.
+ */
+ .global dvmJitToInterpTraceSelectNoChain
+dvmJitToInterpTraceSelectNoChain:
+ lw gp, STACK_OFFSET_GP(sp)
+#if defined(WITH_JIT_TUNING)
+ JAL(dvmBumpNoChain)
+#endif
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # (pc, self)
+ move a0, v0
+ sw a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ beqz a0, 2f # 0 means translation does not exist
+ jr a0
+
+/*
+ * Return from the translation cache and immediately request
+ * a translation for the exit target. Commonly used following
+ * invokes.
+ */
+ .global dvmJitToInterpTraceSelect
+dvmJitToInterpTraceSelect:
+ lw gp, STACK_OFFSET_GP(sp)
+ lw rPC, (ra) # get our target PC
+ subu rINST, ra, 8 # save start of chain branch
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # @ (pc, self)
+ sw v0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ beqz v0, 2f
+ move a0, v0
+ move a1, rINST
+ JAL(dvmJitChain) # v0 <- dvmJitChain(codeAddr, chainAddr)
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ move a0, v0
+ beqz a0, toInterpreter # didn't chain - resume with interpreter
+
+ jr a0 # continue native execution
+
+/* No translation, so request one if profiling isn't disabled */
+2:
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ lw a0, offThread_pJitProfTable(rSELF)
+ FETCH_INST()
+ li t0, kJitTSelectRequestHot
+ movn a2, t0, a0 # ask for trace selection
+ bnez a0, common_selectTrace
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+
+/*
+ * Return from the translation cache to the interpreter.
+ * The return was done with a BLX from thumb mode, and
+ * the following 32-bit word contains the target rPC value.
+ * Note that lr (r14) will have its low-order bit set to denote
+ * its thumb-mode origin.
+ *
+ * We'll need to stash our lr origin away, recover the new
+ * target and then check to see if there is a translation available
+ * for our new target. If so, we do a translation chain and
+ * go back to native execution. Otherwise, it's back to the
+ * interpreter (after treating this entry as a potential
+ * trace start).
+ */
+ .global dvmJitToInterpNormal
+dvmJitToInterpNormal:
+ lw gp, STACK_OFFSET_GP(sp)
+ lw rPC, (ra) # get our target PC
+ subu rINST, ra, 8 # save start of chain branch
+#if defined(WITH_JIT_TUNING)
+ JAL(dvmBumpNormal)
+#endif
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # @ (pc, self)
+ move a0, v0
+ sw a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ beqz a0, toInterpreter # go if not, otherwise do chain
+ move a1, rINST
+ JAL(dvmJitChain) # v0 <- dvmJitChain(codeAddr, chainAddr)
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ move a0, v0
+ beqz a0, toInterpreter # didn't chain - resume with interpreter
+
+ jr a0 # continue native execution
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+ .global dvmJitToInterpNoChainNoProfile
+dvmJitToInterpNoChainNoProfile:
+#if defined(WITH_JIT_TUNING)
+ JAL(dvmBumpNoChain)
+#endif
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # (pc, self)
+ move a0, v0
+ sw a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ beqz a0, footer235
+
+ jr a0 # continue native execution if so
+footer235:
+ EXPORT_PC()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/*
+ * Return from the translation cache to the interpreter to do method invocation.
+ * Check if translation exists for the callee, but don't chain to it.
+ */
+
+ .global dvmJitToInterpNoChain
+dvmJitToInterpNoChain:
+ lw gp, STACK_OFFSET_GP(sp)
+#if defined(WITH_JIT_TUNING)
+ JAL(dvmBumpNoChain)
+#endif
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # (pc, self)
+ move a0, v0
+ sw a0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+ beqz a0, 1f
+ jr a0 # continue native execution if so
+1:
+#endif /* WITH_SELF_VERIFICATION */
+
+/*
+ * No translation, restore interpreter regs and start interpreting.
+ * rSELF & rFP were preserved in the translated code, and rPC has
+ * already been restored by the time we get here. We'll need to set
+ * up rIBASE & rINST, and load the address of the JitTable into r0.
+ */
+
+toInterpreter:
+ EXPORT_PC()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ lw a0, offThread_pJitProfTable(rSELF)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ # NOTE: intended fallthrough
+
+/*
+ * Similar to common_updateProfile, but tests for null pJitProfTable
+ * r0 holds pJifProfTAble, rINST is loaded, rPC is current and
+ * rIBASE has been recently refreshed.
+ */
+
+common_testUpdateProfile:
+
+ beqz a0, 4f
+
+/*
+ * Common code to update potential trace start counter, and initiate
+ * a trace-build if appropriate.
+ * On entry here:
+ * r0 <= pJitProfTable (verified non-NULL)
+ * rPC <= Dalvik PC
+ * rINST <= next instruction
+ */
+common_updateProfile:
+ srl a3, rPC, 12 # cheap, but fast hash function
+ xor a3, a3, rPC
+ andi a3, a3, JIT_PROF_SIZE-1 # eliminate excess bits
+ addu t1, a0, a3
+ lbu a1, (t1) # get counter
+ GET_INST_OPCODE(t0)
+ subu a1, a1, 1 # decrement counter
+ sb a1, (t1) # and store it
+ beqz a1, 1f
+ GOTO_OPCODE(t0) # if not threshold, fallthrough otherwise
+1:
+ /* Looks good, reset the counter */
+ lw a1, offThread_jitThreshold(rSELF)
+ sb a1, (t1)
+ EXPORT_PC()
+ move a0, rPC
+ move a1, rSELF
+ JAL(dvmJitGetTraceAddrThread) # (pc, self)
+ move a0, v0
+ sw v0, offThread_inJitCodeCache(rSELF) # set the inJitCodeCache flag
+ move a1, rPC # arg1 of translation may need this
+ move ra, zero # in case target is HANDLER_INTERPRET
+
+#if !defined(WITH_SELF_VERIFICATION)
+ li t0, kJitTSelectRequest # ask for trace selection
+ movz a2, t0, a0
+ beqz a0, common_selectTrace
+ jr a0 # jump to the translation
+#else
+
+ bne a0, zero, skip_ask_for_trace_selection
+ li a2, kJitTSelectRequest # ask for trace selection
+ j common_selectTrace
+
+skip_ask_for_trace_selection:
+ /*
+ * At this point, we have a target translation. However, if
+ * that translation is actually the interpret-only pseudo-translation
+ * we want to treat it the same as no translation.
+ */
+ move rBIX, a0 # save target
+ jal dvmCompilerGetInterpretTemplate
+ # special case?
+ bne v0, rBIX, jitSVShadowRunStart # set up self verification shadow space
+ # Need to clear the inJitCodeCache flag
+ sw zero, offThread_inJitCodeCache(rSELF) # back to the interp land
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+ /* no return */
+#endif
+
+/*
+ * On entry:
+ * r2 is jit state.
+ */
+
+common_selectTrace:
+ lhu a0, offThread_subMode(rSELF)
+ andi a0, (kSubModeJitTraceBuild | kSubModeJitSV)
+ bnez a0, 3f # already doing JIT work, continue
+ sw a2, offThread_jitState(rSELF)
+ move a0, rSELF
+
+/*
+ * Call out to validate trace-building request. If successful,
+ * rIBASE will be swapped to to send us into single-stepping trace
+ * building mode, so we need to refresh before we continue.
+ */
+
+ EXPORT_PC()
+ SAVE_PC_TO_SELF()
+ SAVE_FP_TO_SELF()
+ JAL(dvmJitCheckTraceRequest)
+3:
+ FETCH_INST()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+4:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0)
+ /* no return */
+#endif
+
+#if defined(WITH_SELF_VERIFICATION)
+
+/*
+ * Save PC and registers to shadow memory for self verification mode
+ * before jumping to native translation.
+ * On entry:
+ * rPC, rFP, rSELF: the values that they should contain
+ * r10: the address of the target translation.
+ */
+jitSVShadowRunStart:
+ move a0, rPC # r0 <- program counter
+ move a1, rFP # r1 <- frame pointer
+ move a2, rSELF # r2 <- InterpState pointer
+ move a3, rBIX # r3 <- target translation
+ jal dvmSelfVerificationSaveState # save registers to shadow space
+ lw rFP, offShadowSpace_shadowFP(v0) # rFP <- fp in shadow space
+ jr rBIX # jump to the translation
+
+/*
+ * Restore PC, registers, and interpState to original values
+ * before jumping back to the interpreter.
+ */
+jitSVShadowRunEnd:
+ move a1, rFP # pass ending fp
+ move a3, rSELF # pass self ptr for convenience
+ jal dvmSelfVerificationRestoreState # restore pc and fp values
+ LOAD_PC_FP_FROM_SELF() # restore pc, fp
+ lw a1, offShadowSpace_svState(a0) # get self verification state
+ beq a1, zero, 1f # check for punt condition
+
+ # Setup SV single-stepping
+ move a0, rSELF
+ li a1, kSubModeJitSV
+ JAL(dvmEnableSubMode) # (self, subMode)
+ li a2, kJitSelfVerification # ask for self verification
+ sw a2, offThread_jitState(rSELF)
+ # Intentional fallthrough
+
+1:
+ # exit to interpreter without check
+ EXPORT_PC()
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+#endif
+
+/*
+ * The equivalent of "goto bail", this calls through the "bail handler".
+ * It will end this interpreter activation, and return to the caller
+ * of dvmMterpStdRun.
+ *
+ * State registers will be saved to the "thread" area before bailing
+ * debugging purposes
+ */
+ .ent common_gotoBail
+common_gotoBail:
+ SAVE_PC_FP_TO_SELF() # export state to "thread"
+ move a0, rSELF # a0 <- self ptr
+ b dvmMterpStdBail # call(self, changeInterp)
+ .end common_gotoBail
+
+/*
+ * The JIT's invoke method needs to remember the callsite class and
+ * target pair. Save them here so that they are available to
+ * dvmCheckJit following the interpretation of this invoke.
+ */
+#if defined(WITH_JIT)
+save_callsiteinfo:
+ beqz rOBJ, 1f
+ lw rOBJ, offObject_clazz(rOBJ)
+1:
+ sw a0, offThread_methodToCall(rSELF)
+ sw rOBJ, offThread_callsiteClass(rSELF)
+ jr ra
+#endif
+
+/*
+ * Common code for jumbo method invocation.
+ * NOTE: this adjusts rPC to account for the difference in instruction width.
+ * As a result, the savedPc in the stack frame will not be wholly accurate. So
+ * long as that is only used for source file line number calculations, we're
+ * okay.
+ */
+common_invokeMethodJumboNoThis:
+#if defined(WITH_JIT)
+ /* On entry: a0 is "Method* methodToCall */
+ li rOBJ, 0 # clear "this"
+#endif
+common_invokeMethodJumbo:
+ /* On entry: a0 is "Method* methodToCall, rOBJ is "this" */
+.LinvokeNewJumbo:
+#if defined(WITH_JIT)
+ lhu a1, offThread_subMode(rSELF)
+ andi a1, kSubModeJitTraceBuild
+ beqz a1, 1f
+ JAL(save_callsiteinfo)
+#endif
+/* prepare to copy args to "outs" area of current frame */
+1:
+ add rPC, rPC, 4 # adjust pc to make return consistent
+ FETCH(a2, 1)
+ SAVEAREA_FROM_FP(rBIX, rFP) # rBIX <- stack save area
+ beqz a2, .LinvokeArgsDone # if no args, skip the rest
+ FETCH(a1, 2) # a1 <- CCCC
+ b .LinvokeRangeArgs # handle args like invoke range
+
+
+/*
+ * Common code for method invocation with range.
+ *
+ * On entry:
+ * a0 is "Method* methodToCall", the method we're trying to call
+ */
+common_invokeMethodRange:
+.LinvokeNewRange:
+#if defined(WITH_JIT)
+ lhu a1, offThread_subMode(rSELF)
+ andi a1, kSubModeJitTraceBuild
+ beqz a1, 1f
+ JAL(save_callsiteinfo)
+#endif
+ # prepare to copy args to "outs" area of current frame
+1:
+ GET_OPA(a2)
+ SAVEAREA_FROM_FP(rBIX, rFP) # rBIX <- stack save area
+ beqz a2, .LinvokeArgsDone
+ FETCH(a1, 2) # a1 <- CCCC
+.LinvokeRangeArgs:
+ # a0=methodToCall, a1=CCCC, a2=count, rBIX=outs
+ # (very few methods have > 10 args; could unroll for common cases)
+ EAS2(a3, rFP, a1)
+ sll t0, a2, 2
+ subu rBIX, rBIX, t0
+
+1:
+ lw a1, 0(a3)
+ addu a3, a3, 4
+ subu a2, a2, 1
+ sw a1, 0(rBIX)
+ addu rBIX, 4
+ bnez a2, 1b
+ b .LinvokeArgsDone
+
+/*
+ * Common code for method invocation without range.
+ *
+ * On entry:
+ * a0 is "Method* methodToCall", "rOBJ is this"
+ */
+common_invokeMethodNoRange:
+.LinvokeNewNoRange:
+#if defined(WITH_JIT)
+ lhu a1, offThread_subMode(rSELF)
+ andi a1, kSubModeJitTraceBuild
+ beqz a1, 1f
+ JAL(save_callsiteinfo)
+#endif
+
+ # prepare to copy args to "outs" area of current frame
+1:
+ GET_OPB(a2)
+ SAVEAREA_FROM_FP(rBIX, rFP)
+ beqz a2, .LinvokeArgsDone
+ FETCH(a1, 2)
+
+ # a0=methodToCall, a1=GFED, a2=count,
+.LinvokeNonRange:
+ beq a2, 0, 0f
+ beq a2, 1, 1f
+ beq a2, 2, 2f
+ beq a2, 3, 3f
+ beq a2, 4, 4f
+ beq a2, 5, 5f
+
+5:
+ and t0, rINST, 0x0f00
+ ESRN(t2, rFP, t0, 6)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+4:
+ and t0, a1, 0xf000
+ ESRN(t2, rFP, t0, 10)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+3:
+ and t0, a1, 0x0f00
+ ESRN(t2, rFP, t0, 6)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+2:
+ and t0, a1, 0x00f0
+ ESRN(t2, rFP, t0, 2)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+1:
+ and t0, a1, 0x000f
+ EASN(t2, rFP, t0, 2)
+ lw a3, (t2)
+ subu rBIX, 4
+ sw a3, 0(rBIX)
+
+0:
+ #fall through .LinvokeArgsDone
+
+
+.LinvokeArgsDone: # a0=methodToCall
+ lhu rOBJ, offMethod_registersSize(a0)
+ lhu a3, offMethod_outsSize(a0)
+ lw a2, offMethod_insns(a0)
+ lw rINST, offMethod_clazz(a0)
+ # find space for the new stack frame, check for overflow
+ SAVEAREA_FROM_FP(a1, rFP) # a1 <- stack save area
+ sll t0, rOBJ, 2 # a1 <- newFp (old savearea - regsSize)
+ subu a1, a1, t0
+ SAVEAREA_FROM_FP(rBIX, a1)
+ lw rOBJ, offThread_interpStackEnd(rSELF) # t3 <- interpStackEnd
+ sll t2, a3, 2
+ subu t0, rBIX, t2
+ lhu ra, offThread_subMode(rSELF)
+ lw a3, offMethod_accessFlags(a0) # a3 <- methodToCall->accessFlags
+ bltu t0, rOBJ, .LstackOverflow # yes, this frame will overflow stack
+
+
+ # set up newSaveArea
+#ifdef EASY_GDB
+ SAVEAREA_FROM_FP(t0, rFP)
+ sw t0, offStackSaveArea_prevSave(rBIX)
+#endif
+ sw rFP, (offStackSaveArea_prevFrame)(rBIX)
+ sw rPC, (offStackSaveArea_savedPc)(rBIX)
+#if defined(WITH_JIT)
+ sw zero, (offStackSaveArea_returnAddr)(rBIX)
+#endif
+ sw a0, (offStackSaveArea_method)(rBIX)
+ # Profiling?
+ bnez ra, 2f
+1:
+ and t2, a3, ACC_NATIVE
+ bnez t2, .LinvokeNative
+ lhu rOBJ, (a2) # rOBJ -< load Inst from New PC
+ lw a3, offClassObject_pDvmDex(rINST)
+ move rPC, a2 # Publish new rPC
+ # Update state values for the new method
+ # a0=methodToCall, a1=newFp, a3=newMethodClass, rOBJ=newINST
+ sw a0, offThread_method(rSELF)
+ sw a3, offThread_methodClassDex(rSELF)
+ li a2, 1
+ sw a2, offThread_debugIsMethodEntry(rSELF)
+
+#if defined(WITH_JIT)
+ lw a0, offThread_pJitProfTable(rSELF)
+ move rFP, a1 # fp = newFp
+ GET_PREFETCHED_OPCODE(t0, rOBJ) # extract prefetched opcode from rOBJ
+ move rINST, rOBJ # publish new rINST
+ sw a1, offThread_curFrame(rSELF)
+ bnez a0, common_updateProfile
+ GOTO_OPCODE(t0)
+#else
+ move rFP, a1
+ GET_PREFETCHED_OPCODE(t0, rOBJ)
+ move rINST, rOBJ
+ sw a1, offThread_curFrame(rSELF)
+ GOTO_OPCODE(t0)
+#endif
+
+2:
+ # Profiling - record method entry. a0: methodToCall
+ STACK_STORE(a0, 0)
+ STACK_STORE(a1, 4)
+ STACK_STORE(a2, 8)
+ STACK_STORE(a3, 12)
+ sw rPC, offThread_pc(rSELF) # update interpSave.pc
+ move a1, a0
+ move a0, rSELF
+ JAL(dvmReportInvoke)
+ STACK_LOAD(a3, 12) # restore a0-a3
+ STACK_LOAD(a2, 8)
+ STACK_LOAD(a1, 4)
+ STACK_LOAD(a0, 0)
+ b 1b
+.LinvokeNative:
+ # Prep for the native call
+ # a0=methodToCall, a1=newFp, rBIX=newSaveArea
+ lhu ra, offThread_subMode(rSELF)
+ lw t3, offThread_jniLocal_topCookie(rSELF)
+ sw a1, offThread_curFrame(rSELF)
+ sw t3, offStackSaveArea_localRefCookie(rBIX) # newFp->localRefCookie=top
+ move a2, a0
+ move a0, a1
+ addu a1, rSELF, offThread_retval
+ move a3, rSELF
+#ifdef ASSIST_DEBUGGER
+ /* insert fake function header to help gdb find the stack frame */
+ b .Lskip
+ .ent dalvik_mterp
+dalvik_mterp:
+ STACK_STORE_FULL()
+.Lskip:
+#endif
+ bnez ra, 11f # Any special SubModes active?
+ lw t9, offMethod_nativeFunc(a2)
+ jalr t9
+ lw gp, STACK_OFFSET_GP(sp)
+7:
+ # native return; rBIX=newSaveArea
+ # equivalent to dvmPopJniLocals
+ lw a0, offStackSaveArea_localRefCookie(rBIX)
+ lw a1, offThread_exception(rSELF)
+ sw rFP, offThread_curFrame(rSELF)
+ sw a0, offThread_jniLocal_topCookie(rSELF) # new top <- old top
+ bnez a1, common_exceptionThrown
+
+ FETCH_ADVANCE_INST(3)
+ GET_INST_OPCODE(t0)
+ GOTO_OPCODE(t0)
+11:
+ # a0=newFp, a1=&retval, a2=methodToCall, a3=self, ra=subModes
+ SCRATCH_STORE(a0, 0)
+ SCRATCH_STORE(a1, 4)
+ SCRATCH_STORE(a2, 8)
+ SCRATCH_STORE(a3, 12)
+ move a0, a2 # a0 <- methodToCall
+ move a1, rSELF
+ move a2, rFP
+ JAL(dvmReportPreNativeInvoke) # (methodToCall, self, fp)
+ SCRATCH_LOAD(a3, 12) # restore a0-a3
+ SCRATCH_LOAD(a2, 8)
+ SCRATCH_LOAD(a1, 4)
+ SCRATCH_LOAD(a0, 0)
+
+ # Call the native method
+ lw t9, offMethod_nativeFunc(a2) # t9<-methodToCall->nativeFunc
+ jalr t9
+ lw gp, STACK_OFFSET_GP(sp)
+
+ # Restore the pre-call arguments
+ SCRATCH_LOAD(a3, 12) # restore a0-a3
+ SCRATCH_LOAD(a2, 8)
+ SCRATCH_LOAD(a1, 4)
+ SCRATCH_LOAD(a0, 0)
+
+ # Finish up any post-invoke subMode requirements
+ move a0, a2
+ move a1, rSELF
+ move a2, rFP
+ JAL(dvmReportPostNativeInvoke) # (methodToCall, self, fp)
+ b 7b
+
+
+.LstackOverflow: # a0=methodToCall
+ move a1, a0 # a1 <- methodToCall
+ move a0, rSELF # a0 <- self
+ JAL(dvmHandleStackOverflow) # dvmHandleStackOverflow(self, methodToCall)
+ b common_exceptionThrown
+#ifdef ASSIST_DEBUGGER
+ .end dalvik_mterp
+#endif
+
+ /*
+ * Common code for method invocation, calling through "glue code".
+ *
+ * TODO: now that we have range and non-range invoke handlers, this
+ * needs to be split into two. Maybe just create entry points
+ * that set r9 and jump here?
+ *
+ * On entry:
+ * r0 is "Method* methodToCall", the method we're trying to call
+ * r9 is "bool methodCallRange", indicating if this is a /range variant
+ */
+
+/*
+ * Common code for handling a return instruction.
+ *
+ * This does not return.
+ */
+common_returnFromMethod:
+.LreturnNew:
+ lhu t0, offThread_subMode(rSELF)
+ SAVEAREA_FROM_FP(a0, rFP)
+ lw rOBJ, offStackSaveArea_savedPc(a0) # rOBJ = saveArea->savedPc
+ bnez t0, 19f
+14:
+ lw rFP, offStackSaveArea_prevFrame(a0) # fp = saveArea->prevFrame
+ lw a2, (offStackSaveArea_method - sizeofStackSaveArea)(rFP)
+ # a2<- method we're returning to
+ # is this a break frame?
+ beqz a2, common_gotoBail # break frame, bail out completely
+
+ lw rBIX, offMethod_clazz(a2) # rBIX<- method->clazz
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh rIBASE
+ PREFETCH_ADVANCE_INST(rINST, rOBJ, 3) # advance rOBJ, update new rINST
+ sw a2, offThread_method(rSELF) # self->method = newSave->method
+ lw a1, offClassObject_pDvmDex(rBIX) # r1<- method->clazz->pDvmDex
+ sw rFP, offThread_curFrame(rSELF) # curFrame = fp
+#if defined(WITH_JIT)
+ lw rBIX, offStackSaveArea_returnAddr(a0)
+ move rPC, rOBJ # publish new rPC
+ sw a1, offThread_methodClassDex(rSELF)
+ sw rBIX, offThread_inJitCodeCache(rSELF) # may return to JIT'ed land
+ beqz rBIX, 15f # caller is compiled code
+ move t9, rBIX
+ jalr t9
+ lw gp, STACK_OFFSET_GP(sp)
+15:
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+#else
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ move rPC, rOBJ # publish new rPC
+ sw a1, offThread_methodClassDex(rSELF)
+ GOTO_OPCODE(t0)
+#endif
+
+19:
+ # Handle special actions
+ # On entry, a0: StackSaveArea
+ lw a1, offStackSaveArea_prevFrame(a0) # a1<- prevFP
+ sw rPC, offThread_pc(rSELF) # update interpSave.pc
+ sw a1, offThread_curFrame(rSELF) # update interpSave.curFrame
+ move a0, rSELF
+ JAL(dvmReportReturn)
+ SAVEAREA_FROM_FP(a0, rFP) # restore StackSaveArea
+ b 14b
+
+ .if 0
+ /*
+ * Return handling, calls through "glue code".
+ */
+.LreturnOld:
+ SAVE_PC_FP_TO_SELF() # export state
+ move a0, rSELF # arg to function
+ JAL(dvmMterp_returnFromMethod)
+ b common_resumeAfterGlueCall
+ .endif
+
+/*
+ * Somebody has thrown an exception. Handle it.
+ *
+ * If the exception processing code returns to us (instead of falling
+ * out of the interpreter), continue with whatever the next instruction
+ * now happens to be.
+ *
+ * This does not return.
+ */
+ .global dvmMterpCommonExceptionThrown
+dvmMterpCommonExceptionThrown:
+common_exceptionThrown:
+.LexceptionNew:
+
+ EXPORT_PC()
+ move a0, rSELF
+ JAL(dvmCheckSuspendPending)
+ lw rOBJ, offThread_exception(rSELF)
+ move a1, rSELF
+ move a0, rOBJ
+ JAL(dvmAddTrackedAlloc)
+ lhu a2, offThread_subMode(rSELF)
+ sw zero, offThread_exception(rSELF)
+
+ # Special subMode?
+ bnez a2, 7f # any special subMode handling needed?
+8:
+ /* set up args and a local for "&fp" */
+ sw rFP, 20(sp) # store rFP => tmp
+ addu t0, sp, 20 # compute &tmp
+ sw t0, STACK_OFFSET_ARG04(sp) # save it in arg4 as per ABI
+ li a3, 0 # a3 <- false
+ lw a1, offThread_method(rSELF)
+ move a0, rSELF
+ lw a1, offMethod_insns(a1)
+ lhu ra, offThread_subMode(rSELF)
+ move a2, rOBJ
+ subu a1, rPC, a1
+ sra a1, a1, 1
+
+ /* call, r0 gets catchRelPc (a code-unit offset) */
+ JAL(dvmFindCatchBlock) # call(self, relPc, exc, scan?, &fp)
+ lw rFP, 20(sp) # retrieve the updated rFP
+
+ /* update frame pointer and check result from dvmFindCatchBlock */
+ move a0, v0
+ bltz v0, .LnotCaughtLocally
+
+ /* fix earlier stack overflow if necessary; Preserve a0 */
+ lbu a1, offThread_stackOverflowed(rSELF)
+ beqz a1, 1f
+ move rBIX, a0
+ move a0, rSELF
+ move a1, rOBJ
+ JAL(dvmCleanupStackOverflow)
+ move a0, rBIX
+
+1:
+
+/* adjust locals to match self->interpSave.curFrame and updated PC */
+ SAVEAREA_FROM_FP(a1, rFP) # a1<- new save area
+ lw a1, offStackSaveArea_method(a1)
+ sw a1, offThread_method(rSELF)
+ lw a2, offMethod_clazz(a1)
+ lw a3, offMethod_insns(a1)
+ lw a2, offClassObject_pDvmDex(a2)
+ EAS1(rPC, a3, a0)
+ sw a2, offThread_methodClassDex(rSELF)
+
+ /* release the tracked alloc on the exception */
+ move a0, rOBJ
+ move a1, rSELF
+ JAL(dvmReleaseTrackedAlloc)
+
+ /* restore the exception if the handler wants it */
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ FETCH_INST()
+ GET_INST_OPCODE(t0)
+ bne t0, OP_MOVE_EXCEPTION, 2f
+ sw rOBJ, offThread_exception(rSELF)
+2:
+ GOTO_OPCODE(t0)
+
+ # Manage debugger bookkeeping
+7:
+ sw rPC, offThread_pc(rSELF)
+ sw rFP, offThread_curFrame(rSELF)
+ move a0, rSELF
+ move a1, rOBJ
+ JAL(dvmReportExceptionThrow)
+ b 8b
+
+.LnotCaughtLocally: # rOBJ = exception
+ /* fix stack overflow if necessary */
+ lbu a1, offThread_stackOverflowed(rSELF)
+ beqz a1, 3f
+ move a0, rSELF
+ move a1, rOBJ
+ JAL(dvmCleanupStackOverflow) # dvmCleanupStackOverflow(self, exception)
+
+3:
+ # may want to show "not caught locally" debug messages here
+#if DVM_SHOW_EXCEPTION >= 2
+ /* call __android_log_print(prio, tag, format, ...) */
+ /* "Exception %s from %s:%d not caught locally" */
+ lw a0, offThread_method(rSELF)
+ lw a1, offMethod_insns(a0)
+ subu a1, rPC, a1
+ sra a1, a1, 1
+ JAL(dvmLineNumFromPC)
+ sw v0, 20(sp)
+ # dvmGetMethodSourceFile(method)
+ lw a0, offThread_method(rSELF)
+ JAL(dvmGetMethodSourceFile)
+ sw v0, 16(sp)
+ # exception->clazz->descriptor
+ lw a3, offObject_clazz(rOBJ)
+ lw a3, offClassObject_descriptor(a3)
+ la a2, .LstrExceptionNotCaughtLocally
+ la a1, .LstrLogTag
+ li a0, 3
+ JAL(__android_log_print)
+#endif
+ sw rOBJ, offThread_exception(rSELF)
+ move a0, rOBJ
+ move a1, rSELF
+ JAL(dvmReleaseTrackedAlloc)
+ b common_gotoBail
+
+ /*
+ * Exception handling, calls through "glue code".
+ */
+ .if 0
+.LexceptionOld:
+ SAVE_PC_TO_SELF() # export state
+ SAVE_FP_TO_SELF()
+ move a0, rSELF # arg to function
+ JAL(dvmMterp_exceptionThrown)
+ b common_resumeAfterGlueCall
+ .endif
+
+#if defined(WITH_JIT)
+ /*
+ * If the JIT is actively building a trace we need to make sure
+ * that the field is fully resolved before including the current
+ * instruction.
+ *
+ * On entry:
+ * rBIX: &dvmDex->pResFields[field]
+ * a0: field pointer (must preserve)
+ */
+common_verifyField:
+ lhu a3, offThread_subMode(rSELF)
+ andi a3, kSubModeJitTraceBuild
+ bnez a3, 1f # Not building trace, continue
+ jr ra
+1:
+ lw a1, (rBIX)
+ beqz a1, 2f # resolution complete ?
+ jr ra
+2:
+ SCRATCH_STORE(a0, 0)
+ SCRATCH_STORE(a1, 4)
+ SCRATCH_STORE(a2, 8)
+ SCRATCH_STORE(a3, 12)
+ SCRATCH_STORE(ra, 16)
+ move a0, rSELF
+ move a1, rPC
+ JAL(dvmJitEndTraceSelect) #(self,pc) end trace before this inst)
+ SCRATCH_LOAD(a0, 0)
+ SCRATCH_LOAD(a1, 4)
+ SCRATCH_LOAD(a2, 8)
+ SCRATCH_LOAD(a3, 12)
+ SCRATCH_LOAD(ra, 16)
+ jr ra # return
+#endif
+
+/*
+ * After returning from a "glued" function, pull out the updated
+ * values and start executing at the next instruction.
+ */
+common_resumeAfterGlueCall:
+ LOAD_PC_FP_FROM_SELF() # pull rPC and rFP out of thread
+ lw rIBASE, offThread_curHandlerTable(rSELF) # refresh
+ FETCH_INST() # load rINST from rPC
+ GET_INST_OPCODE(t0) # extract opcode from rINST
+ GOTO_OPCODE(t0) # jump to next instruction
+
+/*
+ * Invalid array index. Note that our calling convention is strange; we use a1
+ * and a3 because those just happen to be the registers all our callers are
+ * using. We move a3 before calling the C function, but a1 happens to match.
+ * a1: index
+ * a3: size
+ */
+common_errArrayIndex:
+ EXPORT_PC()
+ move a0, a3
+ JAL(dvmThrowArrayIndexOutOfBoundsException)
+ b common_exceptionThrown
+
+/*
+ * Integer divide or mod by zero.
+ */
+common_errDivideByZero:
+ EXPORT_PC()
+ la a0, .LstrDivideByZero
+ JAL(dvmThrowArithmeticException)
+ b common_exceptionThrown
+
+/*
+ * Attempt to allocate an array with a negative size.
+ * On entry: length in a1
+ */
+common_errNegativeArraySize:
+ EXPORT_PC()
+ move a0, a1 # arg0 <- len
+ JAL(dvmThrowNegativeArraySizeException) # (len)
+ b common_exceptionThrown
+
+/*
+ * Invocation of a non-existent method.
+ * On entry: method name in a1
+ */
+common_errNoSuchMethod:
+ EXPORT_PC()
+ move a0, a1
+ JAL(dvmThrowNoSuchMethodError)
+ b common_exceptionThrown
+
+/*
+ * We encountered a null object when we weren't expecting one. We
+ * export the PC, throw a NullPointerException, and goto the exception
+ * processing code.
+ */
+common_errNullObject:
+ EXPORT_PC()
+ li a0, 0
+ JAL(dvmThrowNullPointerException)
+ b common_exceptionThrown
+
+/*
+ * For debugging, cause an immediate fault. The source address will be in ra. Use a jal to jump here.
+ */
+common_abort:
+ lw zero,-4(zero) # generate SIGSEGV
+
+/*
+ * Spit out a "we were here", preserving all registers.
+ */
+ .macro SQUEAK num
+common_squeak\num:
+ STACK_STORE_RA();
+ la a0, .LstrSqueak
+ LOAD_IMM(a1, \num);
+ JAL(printf);
+ STACK_LOAD_RA();
+ RETURN;
+ .endm
+
+ SQUEAK 0
+ SQUEAK 1
+ SQUEAK 2
+ SQUEAK 3
+ SQUEAK 4
+ SQUEAK 5
+
+/*
+ * Spit out the number in a0, preserving registers.
+ */
+common_printNum:
+ STACK_STORE_RA()
+ MOVE_REG(a1, a0)
+ la a0, .LstrSqueak
+ JAL(printf)
+ STACK_LOAD_RA()
+ RETURN
+
+/*
+ * Print a newline, preserving registers.
+ */
+common_printNewline:
+ STACK_STORE_RA()
+ la a0, .LstrNewline
+ JAL(printf)
+ STACK_LOAD_RA()
+ RETURN
+
+ /*
+ * Print the 32-bit quantity in a0 as a hex value, preserving registers.
+ */
+common_printHex:
+ STACK_STORE_RA()
+ MOVE_REG(a1, a0)
+ la a0, .LstrPrintHex
+ JAL(printf)
+ STACK_LOAD_RA()
+RETURN;
+
+/*
+ * Print the 64-bit quantity in a0-a1, preserving registers.
+ */
+common_printLong:
+ STACK_STORE_RA()
+ MOVE_REG(a3, a1)
+ MOVE_REG(a2, a0)
+ la a0, .LstrPrintLong
+ JAL(printf)
+ STACK_LOAD_RA()
+ RETURN;
+
+/*
+ * Print full method info. Pass the Method* in a0. Preserves regs.
+ */
+common_printMethod:
+ STACK_STORE_RA()
+ JAL(dvmMterpPrintMethod)
+ STACK_LOAD_RA()
+ RETURN
+
+/*
+ * Call a C helper function that dumps regs and possibly some
+ * additional info. Requires the C function to be compiled in.
+ */
+ .if 0
+common_dumpRegs:
+ STACK_STORE_RA()
+ JAL(dvmMterpDumpMipsRegs)
+ STACK_LOAD_RA()
+ RETURN
+ .endif
+
+/*
+ * Zero-terminated ASCII string data.
+ */
+ .data
+
+.LstrBadEntryPoint:
+ .asciiz "Bad entry point %d\n"
+.LstrDivideByZero:
+ .asciiz "divide by zero"
+.LstrFilledNewArrayNotImpl:
+ .asciiz "filled-new-array only implemented for 'int'"
+.LstrLogTag:
+ .asciiz "mterp"
+.LstrExceptionNotCaughtLocally:
+ .asciiz "Exception %s from %s:%d not caught locally\n"
+
+.LstrNewline:
+ .asciiz "\n"
+.LstrSqueak:
+ .asciiz "<%d>"
+.LstrPrintHex:
+ .asciiz "<0x%x>"
+.LstrPrintLong:
+ .asciiz "<%lld>"
+
+
+ .global dvmAsmAltInstructionStart
+ .type dvmAsmAltInstructionStart, %function
+ .text
+
+dvmAsmAltInstructionStart = .L_ALT_OP_NOP
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NOP: /* 0x00 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (0 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE: /* 0x01 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (1 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_FROM16: /* 0x02 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (2 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_16: /* 0x03 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (3 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_WIDE: /* 0x04 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (4 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_WIDE_FROM16: /* 0x05 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (5 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_WIDE_16: /* 0x06 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (6 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_OBJECT: /* 0x07 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (7 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_OBJECT_FROM16: /* 0x08 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (8 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_OBJECT_16: /* 0x09 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (9 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_RESULT: /* 0x0a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (10 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_RESULT_WIDE: /* 0x0b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (11 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_RESULT_OBJECT: /* 0x0c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (12 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MOVE_EXCEPTION: /* 0x0d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (13 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_RETURN_VOID: /* 0x0e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (14 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_RETURN: /* 0x0f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (15 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_RETURN_WIDE: /* 0x10 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (16 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_RETURN_OBJECT: /* 0x11 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (17 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_4: /* 0x12 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (18 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_16: /* 0x13 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (19 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST: /* 0x14 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (20 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_HIGH16: /* 0x15 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (21 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_WIDE_16: /* 0x16 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (22 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_WIDE_32: /* 0x17 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (23 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_WIDE: /* 0x18 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (24 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_WIDE_HIGH16: /* 0x19 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (25 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_STRING: /* 0x1a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (26 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_STRING_JUMBO: /* 0x1b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (27 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_CLASS: /* 0x1c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (28 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MONITOR_ENTER: /* 0x1d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (29 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MONITOR_EXIT: /* 0x1e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (30 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CHECK_CAST: /* 0x1f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (31 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INSTANCE_OF: /* 0x20 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (32 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ARRAY_LENGTH: /* 0x21 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (33 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NEW_INSTANCE: /* 0x22 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (34 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NEW_ARRAY: /* 0x23 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (35 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_FILLED_NEW_ARRAY: /* 0x24 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (36 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (37 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_FILL_ARRAY_DATA: /* 0x26 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (38 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_THROW: /* 0x27 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (39 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_GOTO: /* 0x28 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (40 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_GOTO_16: /* 0x29 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (41 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_GOTO_32: /* 0x2a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (42 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_PACKED_SWITCH: /* 0x2b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (43 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPARSE_SWITCH: /* 0x2c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (44 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CMPL_FLOAT: /* 0x2d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (45 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CMPG_FLOAT: /* 0x2e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (46 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CMPL_DOUBLE: /* 0x2f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (47 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CMPG_DOUBLE: /* 0x30 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (48 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CMP_LONG: /* 0x31 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (49 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_EQ: /* 0x32 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (50 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_NE: /* 0x33 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (51 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_LT: /* 0x34 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (52 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_GE: /* 0x35 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (53 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_GT: /* 0x36 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (54 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_LE: /* 0x37 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (55 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_EQZ: /* 0x38 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (56 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_NEZ: /* 0x39 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (57 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_LTZ: /* 0x3a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (58 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_GEZ: /* 0x3b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (59 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_GTZ: /* 0x3c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (60 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IF_LEZ: /* 0x3d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (61 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_3E: /* 0x3e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (62 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_3F: /* 0x3f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (63 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_40: /* 0x40 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (64 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_41: /* 0x41 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (65 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_42: /* 0x42 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (66 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_43: /* 0x43 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (67 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AGET: /* 0x44 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (68 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AGET_WIDE: /* 0x45 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (69 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AGET_OBJECT: /* 0x46 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (70 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AGET_BOOLEAN: /* 0x47 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (71 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AGET_BYTE: /* 0x48 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (72 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AGET_CHAR: /* 0x49 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (73 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AGET_SHORT: /* 0x4a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (74 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_APUT: /* 0x4b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (75 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_APUT_WIDE: /* 0x4c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (76 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_APUT_OBJECT: /* 0x4d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (77 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_APUT_BOOLEAN: /* 0x4e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (78 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_APUT_BYTE: /* 0x4f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (79 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_APUT_CHAR: /* 0x50 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (80 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_APUT_SHORT: /* 0x51 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (81 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET: /* 0x52 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (82 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_WIDE: /* 0x53 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (83 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_OBJECT: /* 0x54 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (84 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_BOOLEAN: /* 0x55 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (85 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_BYTE: /* 0x56 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (86 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_CHAR: /* 0x57 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (87 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_SHORT: /* 0x58 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (88 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT: /* 0x59 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (89 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_WIDE: /* 0x5a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (90 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_OBJECT: /* 0x5b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (91 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_BOOLEAN: /* 0x5c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (92 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_BYTE: /* 0x5d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (93 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_CHAR: /* 0x5e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (94 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_SHORT: /* 0x5f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (95 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET: /* 0x60 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (96 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_WIDE: /* 0x61 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (97 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_OBJECT: /* 0x62 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (98 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_BOOLEAN: /* 0x63 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (99 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_BYTE: /* 0x64 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (100 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_CHAR: /* 0x65 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (101 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_SHORT: /* 0x66 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (102 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT: /* 0x67 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (103 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_WIDE: /* 0x68 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (104 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_OBJECT: /* 0x69 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (105 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_BOOLEAN: /* 0x6a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (106 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_BYTE: /* 0x6b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (107 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_CHAR: /* 0x6c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (108 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_SHORT: /* 0x6d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (109 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL: /* 0x6e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (110 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_SUPER: /* 0x6f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (111 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_DIRECT: /* 0x70 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (112 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_STATIC: /* 0x71 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (113 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_INTERFACE: /* 0x72 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (114 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_73: /* 0x73 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (115 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (116 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_SUPER_RANGE: /* 0x75 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (117 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_DIRECT_RANGE: /* 0x76 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (118 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_STATIC_RANGE: /* 0x77 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (119 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (120 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_79: /* 0x79 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (121 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_7A: /* 0x7a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (122 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NEG_INT: /* 0x7b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (123 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NOT_INT: /* 0x7c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (124 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NEG_LONG: /* 0x7d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (125 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NOT_LONG: /* 0x7e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (126 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NEG_FLOAT: /* 0x7f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (127 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NEG_DOUBLE: /* 0x80 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (128 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INT_TO_LONG: /* 0x81 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (129 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INT_TO_FLOAT: /* 0x82 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (130 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INT_TO_DOUBLE: /* 0x83 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (131 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_LONG_TO_INT: /* 0x84 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (132 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_LONG_TO_FLOAT: /* 0x85 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (133 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_LONG_TO_DOUBLE: /* 0x86 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (134 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_FLOAT_TO_INT: /* 0x87 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (135 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_FLOAT_TO_LONG: /* 0x88 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (136 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_FLOAT_TO_DOUBLE: /* 0x89 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (137 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DOUBLE_TO_INT: /* 0x8a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (138 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DOUBLE_TO_LONG: /* 0x8b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (139 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DOUBLE_TO_FLOAT: /* 0x8c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (140 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INT_TO_BYTE: /* 0x8d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (141 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INT_TO_CHAR: /* 0x8e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (142 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INT_TO_SHORT: /* 0x8f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (143 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_INT: /* 0x90 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (144 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SUB_INT: /* 0x91 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (145 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_INT: /* 0x92 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (146 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_INT: /* 0x93 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (147 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_INT: /* 0x94 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (148 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AND_INT: /* 0x95 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (149 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_OR_INT: /* 0x96 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (150 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_XOR_INT: /* 0x97 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (151 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHL_INT: /* 0x98 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (152 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHR_INT: /* 0x99 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (153 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_USHR_INT: /* 0x9a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (154 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_LONG: /* 0x9b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (155 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SUB_LONG: /* 0x9c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (156 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_LONG: /* 0x9d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (157 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_LONG: /* 0x9e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (158 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_LONG: /* 0x9f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (159 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AND_LONG: /* 0xa0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (160 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_OR_LONG: /* 0xa1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (161 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_XOR_LONG: /* 0xa2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (162 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHL_LONG: /* 0xa3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (163 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHR_LONG: /* 0xa4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (164 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_USHR_LONG: /* 0xa5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (165 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_FLOAT: /* 0xa6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (166 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SUB_FLOAT: /* 0xa7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (167 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_FLOAT: /* 0xa8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (168 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_FLOAT: /* 0xa9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (169 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_FLOAT: /* 0xaa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (170 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_DOUBLE: /* 0xab */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (171 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SUB_DOUBLE: /* 0xac */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (172 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_DOUBLE: /* 0xad */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (173 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_DOUBLE: /* 0xae */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (174 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_DOUBLE: /* 0xaf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (175 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_INT_2ADDR: /* 0xb0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (176 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SUB_INT_2ADDR: /* 0xb1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (177 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_INT_2ADDR: /* 0xb2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (178 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_INT_2ADDR: /* 0xb3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (179 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_INT_2ADDR: /* 0xb4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (180 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AND_INT_2ADDR: /* 0xb5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (181 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_OR_INT_2ADDR: /* 0xb6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (182 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_XOR_INT_2ADDR: /* 0xb7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (183 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHL_INT_2ADDR: /* 0xb8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (184 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHR_INT_2ADDR: /* 0xb9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (185 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_USHR_INT_2ADDR: /* 0xba */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (186 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_LONG_2ADDR: /* 0xbb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (187 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SUB_LONG_2ADDR: /* 0xbc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (188 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_LONG_2ADDR: /* 0xbd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (189 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_LONG_2ADDR: /* 0xbe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (190 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_LONG_2ADDR: /* 0xbf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (191 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AND_LONG_2ADDR: /* 0xc0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (192 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_OR_LONG_2ADDR: /* 0xc1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (193 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_XOR_LONG_2ADDR: /* 0xc2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (194 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHL_LONG_2ADDR: /* 0xc3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (195 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHR_LONG_2ADDR: /* 0xc4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (196 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_USHR_LONG_2ADDR: /* 0xc5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (197 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_FLOAT_2ADDR: /* 0xc6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (198 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SUB_FLOAT_2ADDR: /* 0xc7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (199 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_FLOAT_2ADDR: /* 0xc8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (200 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_FLOAT_2ADDR: /* 0xc9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (201 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_FLOAT_2ADDR: /* 0xca */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (202 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_DOUBLE_2ADDR: /* 0xcb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (203 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SUB_DOUBLE_2ADDR: /* 0xcc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (204 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_DOUBLE_2ADDR: /* 0xcd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (205 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_DOUBLE_2ADDR: /* 0xce */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (206 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_DOUBLE_2ADDR: /* 0xcf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (207 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_INT_LIT16: /* 0xd0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (208 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_RSUB_INT: /* 0xd1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (209 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_INT_LIT16: /* 0xd2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (210 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_INT_LIT16: /* 0xd3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (211 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_INT_LIT16: /* 0xd4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (212 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AND_INT_LIT16: /* 0xd5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (213 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_OR_INT_LIT16: /* 0xd6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (214 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_XOR_INT_LIT16: /* 0xd7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (215 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_ADD_INT_LIT8: /* 0xd8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (216 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_RSUB_INT_LIT8: /* 0xd9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (217 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_MUL_INT_LIT8: /* 0xda */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (218 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DIV_INT_LIT8: /* 0xdb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (219 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_REM_INT_LIT8: /* 0xdc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (220 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_AND_INT_LIT8: /* 0xdd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (221 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_OR_INT_LIT8: /* 0xde */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (222 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_XOR_INT_LIT8: /* 0xdf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (223 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHL_INT_LIT8: /* 0xe0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (224 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SHR_INT_LIT8: /* 0xe1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (225 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_USHR_INT_LIT8: /* 0xe2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (226 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_VOLATILE: /* 0xe3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (227 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_VOLATILE: /* 0xe4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (228 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_VOLATILE: /* 0xe5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (229 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_VOLATILE: /* 0xe6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (230 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (231 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_WIDE_VOLATILE: /* 0xe8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (232 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (233 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_WIDE_VOLATILE: /* 0xea */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (234 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_WIDE_VOLATILE: /* 0xeb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (235 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_BREAKPOINT: /* 0xec */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (236 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_THROW_VERIFICATION_ERROR: /* 0xed */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (237 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_EXECUTE_INLINE: /* 0xee */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (238 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_EXECUTE_INLINE_RANGE: /* 0xef */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (239 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_OBJECT_INIT_RANGE: /* 0xf0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (240 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_RETURN_VOID_BARRIER: /* 0xf1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (241 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_QUICK: /* 0xf2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (242 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_WIDE_QUICK: /* 0xf3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (243 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_OBJECT_QUICK: /* 0xf4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (244 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_QUICK: /* 0xf5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (245 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_WIDE_QUICK: /* 0xf6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (246 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_OBJECT_QUICK: /* 0xf7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (247 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (248 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (249 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_SUPER_QUICK: /* 0xfa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (250 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (251 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (252 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_OBJECT_VOLATILE: /* 0xfd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (253 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (254 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_DISPATCH_FF: /* 0xff */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (255 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CONST_CLASS_JUMBO: /* 0x100 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (256 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_CHECK_CAST_JUMBO: /* 0x101 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (257 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INSTANCE_OF_JUMBO: /* 0x102 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (258 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NEW_INSTANCE_JUMBO: /* 0x103 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (259 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_NEW_ARRAY_JUMBO: /* 0x104 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (260 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_FILLED_NEW_ARRAY_JUMBO: /* 0x105 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (261 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_JUMBO: /* 0x106 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (262 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_WIDE_JUMBO: /* 0x107 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (263 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_OBJECT_JUMBO: /* 0x108 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (264 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_BOOLEAN_JUMBO: /* 0x109 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (265 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_BYTE_JUMBO: /* 0x10a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (266 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_CHAR_JUMBO: /* 0x10b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (267 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_SHORT_JUMBO: /* 0x10c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (268 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_JUMBO: /* 0x10d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (269 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_WIDE_JUMBO: /* 0x10e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (270 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_OBJECT_JUMBO: /* 0x10f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (271 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_BOOLEAN_JUMBO: /* 0x110 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (272 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_BYTE_JUMBO: /* 0x111 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (273 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_CHAR_JUMBO: /* 0x112 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (274 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_SHORT_JUMBO: /* 0x113 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (275 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_JUMBO: /* 0x114 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (276 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_WIDE_JUMBO: /* 0x115 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (277 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_OBJECT_JUMBO: /* 0x116 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (278 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_BOOLEAN_JUMBO: /* 0x117 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (279 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_BYTE_JUMBO: /* 0x118 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (280 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_CHAR_JUMBO: /* 0x119 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (281 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_SHORT_JUMBO: /* 0x11a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (282 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_JUMBO: /* 0x11b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (283 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_WIDE_JUMBO: /* 0x11c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (284 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_OBJECT_JUMBO: /* 0x11d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (285 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_BOOLEAN_JUMBO: /* 0x11e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (286 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_BYTE_JUMBO: /* 0x11f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (287 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_CHAR_JUMBO: /* 0x120 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (288 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_SHORT_JUMBO: /* 0x121 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (289 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_VIRTUAL_JUMBO: /* 0x122 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (290 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_SUPER_JUMBO: /* 0x123 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (291 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_DIRECT_JUMBO: /* 0x124 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (292 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_STATIC_JUMBO: /* 0x125 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (293 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_INTERFACE_JUMBO: /* 0x126 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (294 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_27FF: /* 0x127 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (295 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_28FF: /* 0x128 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (296 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_29FF: /* 0x129 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (297 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_2AFF: /* 0x12a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (298 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_2BFF: /* 0x12b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (299 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_2CFF: /* 0x12c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (300 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_2DFF: /* 0x12d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (301 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_2EFF: /* 0x12e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (302 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_2FFF: /* 0x12f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (303 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_30FF: /* 0x130 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (304 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_31FF: /* 0x131 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (305 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_32FF: /* 0x132 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (306 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_33FF: /* 0x133 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (307 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_34FF: /* 0x134 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (308 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_35FF: /* 0x135 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (309 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_36FF: /* 0x136 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (310 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_37FF: /* 0x137 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (311 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_38FF: /* 0x138 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (312 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_39FF: /* 0x139 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (313 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_3AFF: /* 0x13a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (314 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_3BFF: /* 0x13b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (315 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_3CFF: /* 0x13c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (316 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_3DFF: /* 0x13d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (317 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_3EFF: /* 0x13e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (318 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_3FFF: /* 0x13f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (319 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_40FF: /* 0x140 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (320 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_41FF: /* 0x141 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (321 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_42FF: /* 0x142 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (322 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_43FF: /* 0x143 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (323 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_44FF: /* 0x144 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (324 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_45FF: /* 0x145 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (325 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_46FF: /* 0x146 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (326 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_47FF: /* 0x147 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (327 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_48FF: /* 0x148 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (328 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_49FF: /* 0x149 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (329 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_4AFF: /* 0x14a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (330 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_4BFF: /* 0x14b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (331 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_4CFF: /* 0x14c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (332 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_4DFF: /* 0x14d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (333 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_4EFF: /* 0x14e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (334 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_4FFF: /* 0x14f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (335 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_50FF: /* 0x150 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (336 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_51FF: /* 0x151 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (337 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_52FF: /* 0x152 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (338 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_53FF: /* 0x153 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (339 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_54FF: /* 0x154 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (340 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_55FF: /* 0x155 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (341 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_56FF: /* 0x156 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (342 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_57FF: /* 0x157 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (343 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_58FF: /* 0x158 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (344 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_59FF: /* 0x159 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (345 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_5AFF: /* 0x15a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (346 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_5BFF: /* 0x15b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (347 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_5CFF: /* 0x15c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (348 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_5DFF: /* 0x15d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (349 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_5EFF: /* 0x15e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (350 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_5FFF: /* 0x15f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (351 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_60FF: /* 0x160 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (352 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_61FF: /* 0x161 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (353 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_62FF: /* 0x162 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (354 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_63FF: /* 0x163 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (355 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_64FF: /* 0x164 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (356 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_65FF: /* 0x165 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (357 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_66FF: /* 0x166 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (358 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_67FF: /* 0x167 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (359 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_68FF: /* 0x168 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (360 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_69FF: /* 0x169 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (361 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_6AFF: /* 0x16a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (362 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_6BFF: /* 0x16b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (363 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_6CFF: /* 0x16c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (364 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_6DFF: /* 0x16d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (365 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_6EFF: /* 0x16e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (366 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_6FFF: /* 0x16f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (367 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_70FF: /* 0x170 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (368 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_71FF: /* 0x171 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (369 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_72FF: /* 0x172 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (370 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_73FF: /* 0x173 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (371 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_74FF: /* 0x174 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (372 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_75FF: /* 0x175 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (373 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_76FF: /* 0x176 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (374 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_77FF: /* 0x177 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (375 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_78FF: /* 0x178 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (376 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_79FF: /* 0x179 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (377 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_7AFF: /* 0x17a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (378 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_7BFF: /* 0x17b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (379 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_7CFF: /* 0x17c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (380 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_7DFF: /* 0x17d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (381 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_7EFF: /* 0x17e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (382 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_7FFF: /* 0x17f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (383 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_80FF: /* 0x180 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (384 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_81FF: /* 0x181 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (385 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_82FF: /* 0x182 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (386 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_83FF: /* 0x183 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (387 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_84FF: /* 0x184 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (388 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_85FF: /* 0x185 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (389 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_86FF: /* 0x186 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (390 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_87FF: /* 0x187 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (391 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_88FF: /* 0x188 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (392 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_89FF: /* 0x189 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (393 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_8AFF: /* 0x18a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (394 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_8BFF: /* 0x18b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (395 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_8CFF: /* 0x18c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (396 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_8DFF: /* 0x18d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (397 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_8EFF: /* 0x18e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (398 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_8FFF: /* 0x18f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (399 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_90FF: /* 0x190 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (400 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_91FF: /* 0x191 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (401 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_92FF: /* 0x192 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (402 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_93FF: /* 0x193 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (403 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_94FF: /* 0x194 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (404 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_95FF: /* 0x195 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (405 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_96FF: /* 0x196 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (406 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_97FF: /* 0x197 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (407 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_98FF: /* 0x198 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (408 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_99FF: /* 0x199 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (409 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_9AFF: /* 0x19a */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (410 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_9BFF: /* 0x19b */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (411 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_9CFF: /* 0x19c */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (412 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_9DFF: /* 0x19d */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (413 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_9EFF: /* 0x19e */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (414 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_9FFF: /* 0x19f */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (415 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A0FF: /* 0x1a0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (416 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A1FF: /* 0x1a1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (417 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A2FF: /* 0x1a2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (418 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A3FF: /* 0x1a3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (419 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A4FF: /* 0x1a4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (420 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A5FF: /* 0x1a5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (421 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A6FF: /* 0x1a6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (422 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A7FF: /* 0x1a7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (423 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A8FF: /* 0x1a8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (424 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_A9FF: /* 0x1a9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (425 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_AAFF: /* 0x1aa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (426 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_ABFF: /* 0x1ab */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (427 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_ACFF: /* 0x1ac */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (428 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_ADFF: /* 0x1ad */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (429 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_AEFF: /* 0x1ae */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (430 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_AFFF: /* 0x1af */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (431 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B0FF: /* 0x1b0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (432 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B1FF: /* 0x1b1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (433 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B2FF: /* 0x1b2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (434 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B3FF: /* 0x1b3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (435 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B4FF: /* 0x1b4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (436 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B5FF: /* 0x1b5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (437 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B6FF: /* 0x1b6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (438 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B7FF: /* 0x1b7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (439 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B8FF: /* 0x1b8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (440 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_B9FF: /* 0x1b9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (441 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_BAFF: /* 0x1ba */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (442 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_BBFF: /* 0x1bb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (443 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_BCFF: /* 0x1bc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (444 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_BDFF: /* 0x1bd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (445 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_BEFF: /* 0x1be */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (446 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_BFFF: /* 0x1bf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (447 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C0FF: /* 0x1c0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (448 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C1FF: /* 0x1c1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (449 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C2FF: /* 0x1c2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (450 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C3FF: /* 0x1c3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (451 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C4FF: /* 0x1c4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (452 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C5FF: /* 0x1c5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (453 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C6FF: /* 0x1c6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (454 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C7FF: /* 0x1c7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (455 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C8FF: /* 0x1c8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (456 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_C9FF: /* 0x1c9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (457 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_CAFF: /* 0x1ca */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (458 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_CBFF: /* 0x1cb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (459 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_CCFF: /* 0x1cc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (460 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_CDFF: /* 0x1cd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (461 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_CEFF: /* 0x1ce */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (462 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_CFFF: /* 0x1cf */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (463 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D0FF: /* 0x1d0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (464 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D1FF: /* 0x1d1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (465 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D2FF: /* 0x1d2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (466 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D3FF: /* 0x1d3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (467 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D4FF: /* 0x1d4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (468 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D5FF: /* 0x1d5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (469 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D6FF: /* 0x1d6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (470 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D7FF: /* 0x1d7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (471 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D8FF: /* 0x1d8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (472 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_D9FF: /* 0x1d9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (473 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_DAFF: /* 0x1da */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (474 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_DBFF: /* 0x1db */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (475 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_DCFF: /* 0x1dc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (476 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_DDFF: /* 0x1dd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (477 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_DEFF: /* 0x1de */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (478 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_DFFF: /* 0x1df */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (479 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E0FF: /* 0x1e0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (480 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E1FF: /* 0x1e1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (481 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E2FF: /* 0x1e2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (482 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E3FF: /* 0x1e3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (483 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E4FF: /* 0x1e4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (484 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E5FF: /* 0x1e5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (485 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E6FF: /* 0x1e6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (486 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E7FF: /* 0x1e7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (487 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E8FF: /* 0x1e8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (488 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_E9FF: /* 0x1e9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (489 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_EAFF: /* 0x1ea */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (490 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_EBFF: /* 0x1eb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (491 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_ECFF: /* 0x1ec */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (492 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_EDFF: /* 0x1ed */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (493 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_EEFF: /* 0x1ee */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (494 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_EFFF: /* 0x1ef */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (495 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_F0FF: /* 0x1f0 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (496 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_UNUSED_F1FF: /* 0x1f1 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (497 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_INVOKE_OBJECT_INIT_JUMBO: /* 0x1f2 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (498 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_VOLATILE_JUMBO: /* 0x1f3 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (499 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_WIDE_VOLATILE_JUMBO: /* 0x1f4 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (500 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IGET_OBJECT_VOLATILE_JUMBO: /* 0x1f5 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (501 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_VOLATILE_JUMBO: /* 0x1f6 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (502 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_WIDE_VOLATILE_JUMBO: /* 0x1f7 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (503 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_IPUT_OBJECT_VOLATILE_JUMBO: /* 0x1f8 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (504 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_VOLATILE_JUMBO: /* 0x1f9 */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (505 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_WIDE_VOLATILE_JUMBO: /* 0x1fa */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (506 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SGET_OBJECT_VOLATILE_JUMBO: /* 0x1fb */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (507 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_VOLATILE_JUMBO: /* 0x1fc */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (508 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_WIDE_VOLATILE_JUMBO: /* 0x1fd */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (509 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_SPUT_OBJECT_VOLATILE_JUMBO: /* 0x1fe */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (510 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+/* ------------------------------ */
+ .balign 128
+.L_ALT_OP_THROW_VERIFICATION_ERROR_JUMBO: /* 0x1ff */
+/* File: mips/alt_stub.S */
+/*
+ * Inter-instruction transfer stub. Call out to dvmCheckBefore to handle
+ * any interesting requests and then jump to the real instruction
+ * handler. Note that the call to dvmCheckBefore is done as a tail call.
+ * rIBASE updates won't be seen until a refresh, and we can tell we have a
+ * stale rIBASE if breakFlags==0. Always refresh rIBASE here, and then
+ * bail to the real handler if breakFlags==0.
+ */
+ lbu a3, offThread_breakFlags(rSELF)
+ la rBIX, dvmAsmInstructionStart + (511 * 128)
+ lw rIBASE, offThread_curHandlerTable(rSELF)
+ bnez a3, 1f
+ jr rBIX # nothing to do - jump to real handler
+1:
+ EXPORT_PC()
+ move a0, rPC # arg0
+ move a1, rFP # arg1
+ move a2, rSELF # arg2
+ JAL(dvmCheckBefore)
+ jr rBIX
+
+ .balign 128
+ .size dvmAsmAltInstructionStart, .-dvmAsmAltInstructionStart
+ .global dvmAsmAltInstructionEnd
+dvmAsmAltInstructionEnd:
diff --git a/vm/mterp/out/InterpC-allstubs.cpp b/vm/mterp/out/InterpC-allstubs.cpp
index 9410b891f..8e5196c28 100644
--- a/vm/mterp/out/InterpC-allstubs.cpp
+++ b/vm/mterp/out/InterpC-allstubs.cpp
@@ -68,6 +68,14 @@
#if defined(__ARM_EABI__)
# define NO_UNALIGN_64__UNION
#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
//#define LOG_INSTR /* verbose debugging */
diff --git a/vm/mterp/out/InterpC-armv5te-vfp.cpp b/vm/mterp/out/InterpC-armv5te-vfp.cpp
index 2d66aad80..f406bc516 100644
--- a/vm/mterp/out/InterpC-armv5te-vfp.cpp
+++ b/vm/mterp/out/InterpC-armv5te-vfp.cpp
@@ -68,6 +68,14 @@
#if defined(__ARM_EABI__)
# define NO_UNALIGN_64__UNION
#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
//#define LOG_INSTR /* verbose debugging */
diff --git a/vm/mterp/out/InterpC-armv5te.cpp b/vm/mterp/out/InterpC-armv5te.cpp
index 878d56889..106f53a51 100644
--- a/vm/mterp/out/InterpC-armv5te.cpp
+++ b/vm/mterp/out/InterpC-armv5te.cpp
@@ -68,6 +68,14 @@
#if defined(__ARM_EABI__)
# define NO_UNALIGN_64__UNION
#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
//#define LOG_INSTR /* verbose debugging */
diff --git a/vm/mterp/out/InterpC-armv7-a-neon.cpp b/vm/mterp/out/InterpC-armv7-a-neon.cpp
index c55dfc0f1..1f86f6b2f 100644
--- a/vm/mterp/out/InterpC-armv7-a-neon.cpp
+++ b/vm/mterp/out/InterpC-armv7-a-neon.cpp
@@ -68,6 +68,14 @@
#if defined(__ARM_EABI__)
# define NO_UNALIGN_64__UNION
#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
//#define LOG_INSTR /* verbose debugging */
diff --git a/vm/mterp/out/InterpC-armv7-a.cpp b/vm/mterp/out/InterpC-armv7-a.cpp
index 6089a606a..ec7372456 100644
--- a/vm/mterp/out/InterpC-armv7-a.cpp
+++ b/vm/mterp/out/InterpC-armv7-a.cpp
@@ -68,6 +68,14 @@
#if defined(__ARM_EABI__)
# define NO_UNALIGN_64__UNION
#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
//#define LOG_INSTR /* verbose debugging */
diff --git a/vm/mterp/out/InterpC-mips.cpp b/vm/mterp/out/InterpC-mips.cpp
new file mode 100644
index 000000000..52819749f
--- /dev/null
+++ b/vm/mterp/out/InterpC-mips.cpp
@@ -0,0 +1,2423 @@
+/*
+ * This file was generated automatically by gen-mterp.py for 'mips'.
+ *
+ * --> DO NOT EDIT <--
+ */
+
+/* File: c/header.cpp */
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* common includes */
+#include "Dalvik.h"
+#include "interp/InterpDefs.h"
+#include "mterp/Mterp.h"
+#include <math.h> // needed for fmod, fmodf
+#include "mterp/common/FindInterface.h"
+
+/*
+ * Configuration defines. These affect the C implementations, i.e. the
+ * portable interpreter(s) and C stubs.
+ *
+ * Some defines are controlled by the Makefile, e.g.:
+ * WITH_INSTR_CHECKS
+ * WITH_TRACKREF_CHECKS
+ * EASY_GDB
+ * NDEBUG
+ */
+
+#ifdef WITH_INSTR_CHECKS /* instruction-level paranoia (slow!) */
+# define CHECK_BRANCH_OFFSETS
+# define CHECK_REGISTER_INDICES
+#endif
+
+/*
+ * Some architectures require 64-bit alignment for access to 64-bit data
+ * types. We can't just use pointers to copy 64-bit values out of our
+ * interpreted register set, because gcc may assume the pointer target is
+ * aligned and generate invalid code.
+ *
+ * There are two common approaches:
+ * (1) Use a union that defines a 32-bit pair and a 64-bit value.
+ * (2) Call memcpy().
+ *
+ * Depending upon what compiler you're using and what options are specified,
+ * one may be faster than the other. For example, the compiler might
+ * convert a memcpy() of 8 bytes into a series of instructions and omit
+ * the call. The union version could cause some strange side-effects,
+ * e.g. for a while ARM gcc thought it needed separate storage for each
+ * inlined instance, and generated instructions to zero out ~700 bytes of
+ * stack space at the top of the interpreter.
+ *
+ * The default is to use memcpy(). The current gcc for ARM seems to do
+ * better with the union.
+ */
+#if defined(__ARM_EABI__)
+# define NO_UNALIGN_64__UNION
+#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
+
+
+//#define LOG_INSTR /* verbose debugging */
+/* set and adjust ANDROID_LOG_TAGS='*:i jdwp:i dalvikvm:i dalvikvmi:i' */
+
+/*
+ * Export another copy of the PC on every instruction; this is largely
+ * redundant with EXPORT_PC and the debugger code. This value can be
+ * compared against what we have stored on the stack with EXPORT_PC to
+ * help ensure that we aren't missing any export calls.
+ */
+#if WITH_EXTRA_GC_CHECKS > 1
+# define EXPORT_EXTRA_PC() (self->currentPc2 = pc)
+#else
+# define EXPORT_EXTRA_PC()
+#endif
+
+/*
+ * Adjust the program counter. "_offset" is a signed int, in 16-bit units.
+ *
+ * Assumes the existence of "const u2* pc" and "const u2* curMethod->insns".
+ *
+ * We don't advance the program counter until we finish an instruction or
+ * branch, because we do want to have to unroll the PC if there's an
+ * exception.
+ */
+#ifdef CHECK_BRANCH_OFFSETS
+# define ADJUST_PC(_offset) do { \
+ int myoff = _offset; /* deref only once */ \
+ if (pc + myoff < curMethod->insns || \
+ pc + myoff >= curMethod->insns + dvmGetMethodInsnsSize(curMethod)) \
+ { \
+ char* desc; \
+ desc = dexProtoCopyMethodDescriptor(&curMethod->prototype); \
+ LOGE("Invalid branch %d at 0x%04x in %s.%s %s", \
+ myoff, (int) (pc - curMethod->insns), \
+ curMethod->clazz->descriptor, curMethod->name, desc); \
+ free(desc); \
+ dvmAbort(); \
+ } \
+ pc += myoff; \
+ EXPORT_EXTRA_PC(); \
+ } while (false)
+#else
+# define ADJUST_PC(_offset) do { \
+ pc += _offset; \
+ EXPORT_EXTRA_PC(); \
+ } while (false)
+#endif
+
+/*
+ * If enabled, log instructions as we execute them.
+ */
+#ifdef LOG_INSTR
+# define ILOGD(...) ILOG(LOG_DEBUG, __VA_ARGS__)
+# define ILOGV(...) ILOG(LOG_VERBOSE, __VA_ARGS__)
+# define ILOG(_level, ...) do { \
+ char debugStrBuf[128]; \
+ snprintf(debugStrBuf, sizeof(debugStrBuf), __VA_ARGS__); \
+ if (curMethod != NULL) \
+ LOG(_level, LOG_TAG"i", "%-2d|%04x%s", \
+ self->threadId, (int)(pc - curMethod->insns), debugStrBuf); \
+ else \
+ LOG(_level, LOG_TAG"i", "%-2d|####%s", \
+ self->threadId, debugStrBuf); \
+ } while(false)
+void dvmDumpRegs(const Method* method, const u4* framePtr, bool inOnly);
+# define DUMP_REGS(_meth, _frame, _inOnly) dvmDumpRegs(_meth, _frame, _inOnly)
+static const char kSpacing[] = " ";
+#else
+# define ILOGD(...) ((void)0)
+# define ILOGV(...) ((void)0)
+# define DUMP_REGS(_meth, _frame, _inOnly) ((void)0)
+#endif
+
+/* get a long from an array of u4 */
+static inline s8 getLongFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.ll;
+#else
+ s8 val;
+ memcpy(&val, &ptr[idx], 8);
+ return val;
+#endif
+}
+
+/* store a long into an array of u4 */
+static inline void putLongToArray(u4* ptr, int idx, s8 val)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { s8 ll; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.ll = val;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#else
+ memcpy(&ptr[idx], &val, 8);
+#endif
+}
+
+/* get a double from an array of u4 */
+static inline double getDoubleFromArray(const u4* ptr, int idx)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.parts[0] = ptr[0];
+ conv.parts[1] = ptr[1];
+ return conv.d;
+#else
+ double dval;
+ memcpy(&dval, &ptr[idx], 8);
+ return dval;
+#endif
+}
+
+/* store a double into an array of u4 */
+static inline void putDoubleToArray(u4* ptr, int idx, double dval)
+{
+#if defined(NO_UNALIGN_64__UNION)
+ union { double d; u4 parts[2]; } conv;
+
+ ptr += idx;
+ conv.d = dval;
+ ptr[0] = conv.parts[0];
+ ptr[1] = conv.parts[1];
+#else
+ memcpy(&ptr[idx], &dval, 8);
+#endif
+}
+
+/*
+ * If enabled, validate the register number on every access. Otherwise,
+ * just do an array access.
+ *
+ * Assumes the existence of "u4* fp".
+ *
+ * "_idx" may be referenced more than once.
+ */
+#ifdef CHECK_REGISTER_INDICES
+# define GET_REGISTER(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)]) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (fp[(_idx)] = (u4)(_val)) : (assert(!"bad reg"),1969) )
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object *)GET_REGISTER(_idx))
+# define SET_REGISTER_AS_OBJECT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_INT(_idx) ((s4) GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getLongFromArray(fp, (_idx)) : (assert(!"bad reg"),1969) )
+# define SET_REGISTER_WIDE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ (void)putLongToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
+# define GET_REGISTER_FLOAT(_idx) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)])) : (assert(!"bad reg"),1969.0f) )
+# define SET_REGISTER_FLOAT(_idx, _val) \
+ ( (_idx) < curMethod->registersSize ? \
+ (*((float*) &fp[(_idx)]) = (_val)) : (assert(!"bad reg"),1969.0f) )
+# define GET_REGISTER_DOUBLE(_idx) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ getDoubleFromArray(fp, (_idx)) : (assert(!"bad reg"),1969.0) )
+# define SET_REGISTER_DOUBLE(_idx, _val) \
+ ( (_idx) < curMethod->registersSize-1 ? \
+ (void)putDoubleToArray(fp, (_idx), (_val)) : assert(!"bad reg") )
+#else
+# define GET_REGISTER(_idx) (fp[(_idx)])
+# define SET_REGISTER(_idx, _val) (fp[(_idx)] = (_val))
+# define GET_REGISTER_AS_OBJECT(_idx) ((Object*) fp[(_idx)])
+# define SET_REGISTER_AS_OBJECT(_idx, _val) (fp[(_idx)] = (u4)(_val))
+# define GET_REGISTER_INT(_idx) ((s4)GET_REGISTER(_idx))
+# define SET_REGISTER_INT(_idx, _val) SET_REGISTER(_idx, (s4)_val)
+# define GET_REGISTER_WIDE(_idx) getLongFromArray(fp, (_idx))
+# define SET_REGISTER_WIDE(_idx, _val) putLongToArray(fp, (_idx), (_val))
+# define GET_REGISTER_FLOAT(_idx) (*((float*) &fp[(_idx)]))
+# define SET_REGISTER_FLOAT(_idx, _val) (*((float*) &fp[(_idx)]) = (_val))
+# define GET_REGISTER_DOUBLE(_idx) getDoubleFromArray(fp, (_idx))
+# define SET_REGISTER_DOUBLE(_idx, _val) putDoubleToArray(fp, (_idx), (_val))
+#endif
+
+/*
+ * Get 16 bits from the specified offset of the program counter. We always
+ * want to load 16 bits at a time from the instruction stream -- it's more
+ * efficient than 8 and won't have the alignment problems that 32 might.
+ *
+ * Assumes existence of "const u2* pc".
+ */
+#define FETCH(_offset) (pc[(_offset)])
+
+/*
+ * Extract instruction byte from 16-bit fetch (_inst is a u2).
+ */
+#define INST_INST(_inst) ((_inst) & 0xff)
+
+/*
+ * Replace the opcode (used when handling breakpoints). _opcode is a u1.
+ */
+#define INST_REPLACE_OP(_inst, _opcode) (((_inst) & 0xff00) | _opcode)
+
+/*
+ * Extract the "vA, vB" 4-bit registers from the instruction word (_inst is u2).
+ */
+#define INST_A(_inst) (((_inst) >> 8) & 0x0f)
+#define INST_B(_inst) ((_inst) >> 12)
+
+/*
+ * Get the 8-bit "vAA" 8-bit register index from the instruction word.
+ * (_inst is u2)
+ */
+#define INST_AA(_inst) ((_inst) >> 8)
+
+/*
+ * The current PC must be available to Throwable constructors, e.g.
+ * those created by the various exception throw routines, so that the
+ * exception stack trace can be generated correctly. If we don't do this,
+ * the offset within the current method won't be shown correctly. See the
+ * notes in Exception.c.
+ *
+ * This is also used to determine the address for precise GC.
+ *
+ * Assumes existence of "u4* fp" and "const u2* pc".
+ */
+#define EXPORT_PC() (SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc)
+
+/*
+ * Check to see if "obj" is NULL. If so, throw an exception. Assumes the
+ * pc has already been exported to the stack.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler calls into
+ * something that could throw an exception (so we have already called
+ * EXPORT_PC at the top).
+ */
+static inline bool checkForNull(Object* obj)
+{
+ if (obj == NULL) {
+ dvmThrowNullPointerException(NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsHeapAddressObject(obj)) {
+ LOGE("Invalid object %p", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/*
+ * Check to see if "obj" is NULL. If so, export the PC into the stack
+ * frame and throw an exception.
+ *
+ * Perform additional checks on debug builds.
+ *
+ * Use this to check for NULL when the instruction handler doesn't do
+ * anything else that can throw an exception.
+ */
+static inline bool checkForNullExportPC(Object* obj, u4* fp, const u2* pc)
+{
+ if (obj == NULL) {
+ EXPORT_PC();
+ dvmThrowNullPointerException(NULL);
+ return false;
+ }
+#ifdef WITH_EXTRA_OBJECT_VALIDATION
+ if (!dvmIsHeapAddress(obj)) {
+ LOGE("Invalid object %p", obj);
+ dvmAbort();
+ }
+#endif
+#ifndef NDEBUG
+ if (obj->clazz == NULL || ((u4) obj->clazz) <= 65536) {
+ /* probable heap corruption */
+ LOGE("Invalid object class %p (in %p)", obj->clazz, obj);
+ dvmAbort();
+ }
+#endif
+ return true;
+}
+
+/* File: cstubs/stubdefs.cpp */
+/*
+ * In the C mterp stubs, "goto" is a function call followed immediately
+ * by a return.
+ */
+
+#define GOTO_TARGET_DECL(_target, ...) \
+ extern "C" void dvmMterp_##_target(Thread* self, ## __VA_ARGS__);
+
+/* (void)xxx to quiet unused variable compiler warnings. */
+#define GOTO_TARGET(_target, ...) \
+ void dvmMterp_##_target(Thread* self, ## __VA_ARGS__) { \
+ u2 ref, vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ const Method* methodToCall; \
+ StackSaveArea* debugSaveArea; \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst; \
+ (void)methodToCall; (void)debugSaveArea;
+
+#define GOTO_TARGET_END }
+
+/*
+ * Redefine what used to be local variable accesses into Thread struct
+ * references. (These are undefined down in "footer.cpp".)
+ */
+#define retval self->interpSave.retval
+#define pc self->interpSave.pc
+#define fp self->interpSave.curFrame
+#define curMethod self->interpSave.method
+#define methodClassDex self->interpSave.methodClassDex
+#define debugTrackedRefStart self->interpSave.debugTrackedRefStart
+
+/* ugh */
+#define STUB_HACK(x) x
+#if defined(WITH_JIT)
+#define JIT_STUB_HACK(x) x
+#else
+#define JIT_STUB_HACK(x)
+#endif
+
+/*
+ * InterpSave's pc and fp must be valid when breaking out to a
+ * "Reportxxx" routine. Because the portable interpreter uses local
+ * variables for these, we must flush prior. Stubs, however, use
+ * the interpSave vars directly, so this is a nop for stubs.
+ */
+#define PC_FP_TO_SELF()
+#define PC_TO_SELF()
+
+/*
+ * Opcode handler framing macros. Here, each opcode is a separate function
+ * that takes a "self" argument and returns void. We can't declare
+ * these "static" because they may be called from an assembly stub.
+ * (void)xxx to quiet unused variable compiler warnings.
+ */
+#define HANDLE_OPCODE(_op) \
+ extern "C" void dvmMterp_##_op(Thread* self); \
+ void dvmMterp_##_op(Thread* self) { \
+ u4 ref; \
+ u2 vsrc1, vsrc2, vdst; \
+ u2 inst = FETCH(0); \
+ (void)ref; (void)vsrc1; (void)vsrc2; (void)vdst; (void)inst;
+
+#define OP_END }
+
+/*
+ * Like the "portable" FINISH, but don't reload "inst", and return to caller
+ * when done. Further, debugger/profiler checks are handled
+ * before handler execution in mterp, so we don't do them here either.
+ */
+#if defined(WITH_JIT)
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ if (self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) { \
+ dvmCheckJit(pc, self); \
+ } \
+ return; \
+ }
+#else
+#define FINISH(_offset) { \
+ ADJUST_PC(_offset); \
+ return; \
+ }
+#endif
+
+#define FINISH_BKPT(_opcode) /* FIXME? */
+#define DISPATCH_EXTENDED(_opcode) /* FIXME? */
+
+/*
+ * The "goto label" statements turn into function calls followed by
+ * return statements. Some of the functions take arguments, which in the
+ * portable interpreter are handled by assigning values to globals.
+ */
+
+#define GOTO_exceptionThrown() \
+ do { \
+ dvmMterp_exceptionThrown(self); \
+ return; \
+ } while(false)
+
+#define GOTO_returnFromMethod() \
+ do { \
+ dvmMterp_returnFromMethod(self); \
+ return; \
+ } while(false)
+
+#define GOTO_invoke(_target, _methodCallRange, _jumboFormat) \
+ do { \
+ dvmMterp_##_target(self, _methodCallRange, _jumboFormat); \
+ return; \
+ } while(false)
+
+#define GOTO_invokeMethod(_methodCallRange, _methodToCall, _vsrc1, _vdst) \
+ do { \
+ dvmMterp_invokeMethod(self, _methodCallRange, _methodToCall, \
+ _vsrc1, _vdst); \
+ return; \
+ } while(false)
+
+/*
+ * As a special case, "goto bail" turns into a longjmp.
+ */
+#define GOTO_bail() \
+ dvmMterpStdBail(self, false)
+
+/*
+ * Periodically check for thread suspension.
+ *
+ * While we're at it, see if a debugger has attached or the profiler has
+ * started.
+ */
+#define PERIODIC_CHECKS(_pcadj) { \
+ if (dvmCheckSuspendQuick(self)) { \
+ EXPORT_PC(); /* need for precise GC */ \
+ dvmCheckSuspendPending(self); \
+ } \
+ }
+
+/* File: c/opcommon.cpp */
+/* forward declarations of goto targets */
+GOTO_TARGET_DECL(filledNewArray, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtual, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuper, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeInterface, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeDirect, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeStatic, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeVirtualQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeSuperQuick, bool methodCallRange, bool jumboFormat);
+GOTO_TARGET_DECL(invokeMethod, bool methodCallRange, const Method* methodToCall,
+ u2 count, u2 regs);
+GOTO_TARGET_DECL(returnFromMethod);
+GOTO_TARGET_DECL(exceptionThrown);
+
+/*
+ * ===========================================================================
+ *
+ * What follows are opcode definitions shared between multiple opcodes with
+ * minor substitutions handled by the C pre-processor. These should probably
+ * use the mterp substitution mechanism instead, with the code here moved
+ * into common fragment files (like the asm "binop.S"), although it's hard
+ * to give up the C preprocessor in favor of the much simpler text subst.
+ *
+ * ===========================================================================
+ */
+
+#define HANDLE_NUMCONV(_opcode, _opname, _fromtype, _totype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_totype(vdst, \
+ GET_REGISTER##_fromtype(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_FLOAT_TO_INT(_opcode, _opname, _fromvtype, _fromrtype, \
+ _tovtype, _tortype) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ { \
+ /* spec defines specific handling for +/- inf and NaN values */ \
+ _fromvtype val; \
+ _tovtype intMin, intMax, result; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ val = GET_REGISTER##_fromrtype(vsrc1); \
+ intMin = (_tovtype) 1 << (sizeof(_tovtype) * 8 -1); \
+ intMax = ~intMin; \
+ result = (_tovtype) val; \
+ if (val >= intMax) /* +inf */ \
+ result = intMax; \
+ else if (val <= intMin) /* -inf */ \
+ result = intMin; \
+ else if (val != val) /* NaN */ \
+ result = 0; \
+ else \
+ result = (_tovtype) val; \
+ SET_REGISTER##_tortype(vdst, result); \
+ } \
+ FINISH(1);
+
+#define HANDLE_INT_TO_SMALL(_opcode, _opname, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|int-to-%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, (_type) GET_REGISTER(vsrc1)); \
+ FINISH(1);
+
+/* NOTE: the comparison result is always a signed 4-byte integer */
+#define HANDLE_OP_CMPX(_opcode, _opname, _varType, _type, _nanVal) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ int result; \
+ u2 regs; \
+ _varType val1, val2; \
+ vdst = INST_AA(inst); \
+ regs = FETCH(1); \
+ vsrc1 = regs & 0xff; \
+ vsrc2 = regs >> 8; \
+ ILOGV("|cmp%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ val1 = GET_REGISTER##_type(vsrc1); \
+ val2 = GET_REGISTER##_type(vsrc2); \
+ if (val1 == val2) \
+ result = 0; \
+ else if (val1 < val2) \
+ result = -1; \
+ else if (val1 > val2) \
+ result = 1; \
+ else \
+ result = (_nanVal); \
+ ILOGV("+ result=%d", result); \
+ SET_REGISTER(vdst, result); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_IF_XX(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vA, vB, +CCCC*/) \
+ vsrc1 = INST_A(inst); \
+ vsrc2 = INST_B(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp (s4) GET_REGISTER(vsrc2)) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,v%d,+0x%04x", (_opname), vsrc1, vsrc2, \
+ branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,v%d,-", (_opname), vsrc1, vsrc2); \
+ FINISH(2); \
+ }
+
+#define HANDLE_OP_IF_XXZ(_opcode, _opname, _cmp) \
+ HANDLE_OPCODE(_opcode /*vAA, +BBBB*/) \
+ vsrc1 = INST_AA(inst); \
+ if ((s4) GET_REGISTER(vsrc1) _cmp 0) { \
+ int branchOffset = (s2)FETCH(1); /* sign-extended */ \
+ ILOGV("|if-%s v%d,+0x%04x", (_opname), vsrc1, branchOffset); \
+ ILOGV("> branch taken"); \
+ if (branchOffset < 0) \
+ PERIODIC_CHECKS(branchOffset); \
+ FINISH(branchOffset); \
+ } else { \
+ ILOGV("|if-%s v%d,-", (_opname), vsrc1); \
+ FINISH(2); \
+ }
+
+#define HANDLE_UNOP(_opcode, _opname, _pfx, _sfx, _type) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER##_type(vdst, _pfx GET_REGISTER##_type(vsrc1) _sfx); \
+ FINISH(1);
+
+#define HANDLE_OP_X_INT(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ secondVal = GET_REGISTER(vsrc2); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s4) GET_REGISTER(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-int v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (GET_REGISTER(vsrc2) & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT16(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB, #+CCCC*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ vsrc2 = FETCH(1); \
+ ILOGV("|%s-int/lit16 v%d,v%d,#+0x%04x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s2) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s2) vsrc2) == -1) { \
+ /* won't generate /lit16 instr for this; check anyway */ \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op (s2) vsrc2; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ /* non-div/rem case */ \
+ SET_REGISTER(vdst, GET_REGISTER(vsrc1) _op (s2) vsrc2); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_LIT8(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, result; \
+ firstVal = GET_REGISTER(vsrc1); \
+ if ((s1) vsrc2 == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && ((s1) vsrc2) == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op ((s1) vsrc2); \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vsrc1) _op (s1) vsrc2); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_INT_LIT8(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, #+CC*/) \
+ { \
+ u2 litInfo; \
+ vdst = INST_AA(inst); \
+ litInfo = FETCH(1); \
+ vsrc1 = litInfo & 0xff; \
+ vsrc2 = litInfo >> 8; /* constant */ \
+ ILOGV("|%s-int/lit8 v%d,v%d,#+0x%02x", \
+ (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vsrc1) _op (vsrc2 & 0x1f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_INT_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s4 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER(vdst); \
+ secondVal = GET_REGISTER(vsrc1); \
+ if (secondVal == 0) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u4)firstVal == 0x80000000 && secondVal == -1) { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER(vdst, result); \
+ } else { \
+ SET_REGISTER(vdst, \
+ (s4) GET_REGISTER(vdst) _op (s4) GET_REGISTER(vsrc1)); \
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_INT_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-int-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER(vdst, \
+ _cast GET_REGISTER(vdst) _op (GET_REGISTER(vsrc1) & 0x1f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_LONG(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vsrc1); \
+ secondVal = GET_REGISTER_WIDE(vsrc2); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vsrc1) _op (s8) GET_REGISTER_WIDE(vsrc2)); \
+ } \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_SHX_LONG(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-long v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vsrc1) _op (GET_REGISTER(vsrc2) & 0x3f)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_LONG_2ADDR(_opcode, _opname, _op, _chkdiv) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ if (_chkdiv != 0) { \
+ s8 firstVal, secondVal, result; \
+ firstVal = GET_REGISTER_WIDE(vdst); \
+ secondVal = GET_REGISTER_WIDE(vsrc1); \
+ if (secondVal == 0LL) { \
+ EXPORT_PC(); \
+ dvmThrowArithmeticException("divide by zero"); \
+ GOTO_exceptionThrown(); \
+ } \
+ if ((u8)firstVal == 0x8000000000000000ULL && \
+ secondVal == -1LL) \
+ { \
+ if (_chkdiv == 1) \
+ result = firstVal; /* division */ \
+ else \
+ result = 0; /* remainder */ \
+ } else { \
+ result = firstVal _op secondVal; \
+ } \
+ SET_REGISTER_WIDE(vdst, result); \
+ } else { \
+ SET_REGISTER_WIDE(vdst, \
+ (s8) GET_REGISTER_WIDE(vdst) _op (s8)GET_REGISTER_WIDE(vsrc1));\
+ } \
+ FINISH(1);
+
+#define HANDLE_OP_SHX_LONG_2ADDR(_opcode, _opname, _cast, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-long-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_WIDE(vdst, \
+ _cast GET_REGISTER_WIDE(vdst) _op (GET_REGISTER(vsrc1) & 0x3f)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_FLOAT(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-float v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vsrc1) _op GET_REGISTER_FLOAT(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_DOUBLE(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ u2 srcRegs; \
+ vdst = INST_AA(inst); \
+ srcRegs = FETCH(1); \
+ vsrc1 = srcRegs & 0xff; \
+ vsrc2 = srcRegs >> 8; \
+ ILOGV("|%s-double v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vsrc1) _op GET_REGISTER_DOUBLE(vsrc2)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_X_FLOAT_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-float-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_FLOAT(vdst, \
+ GET_REGISTER_FLOAT(vdst) _op GET_REGISTER_FLOAT(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_X_DOUBLE_2ADDR(_opcode, _opname, _op) \
+ HANDLE_OPCODE(_opcode /*vA, vB*/) \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); \
+ ILOGV("|%s-double-2addr v%d,v%d", (_opname), vdst, vsrc1); \
+ SET_REGISTER_DOUBLE(vdst, \
+ GET_REGISTER_DOUBLE(vdst) _op GET_REGISTER_DOUBLE(vsrc1)); \
+ FINISH(1);
+
+#define HANDLE_OP_AGET(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* index */ \
+ ILOGV("|aget%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowArrayIndexOutOfBoundsException( \
+ arrayObj->length, GET_REGISTER(vsrc2)); \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)]); \
+ ILOGV("+ AGET[%d]=%#x", GET_REGISTER(vsrc2), GET_REGISTER(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_OP_APUT(_opcode, _opname, _type, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, vBB, vCC*/) \
+ { \
+ ArrayObject* arrayObj; \
+ u2 arrayInfo; \
+ EXPORT_PC(); \
+ vdst = INST_AA(inst); /* AA: source value */ \
+ arrayInfo = FETCH(1); \
+ vsrc1 = arrayInfo & 0xff; /* BB: array ptr */ \
+ vsrc2 = arrayInfo >> 8; /* CC: index */ \
+ ILOGV("|aput%s v%d,v%d,v%d", (_opname), vdst, vsrc1, vsrc2); \
+ arrayObj = (ArrayObject*) GET_REGISTER(vsrc1); \
+ if (!checkForNull((Object*) arrayObj)) \
+ GOTO_exceptionThrown(); \
+ if (GET_REGISTER(vsrc2) >= arrayObj->length) { \
+ dvmThrowArrayIndexOutOfBoundsException( \
+ arrayObj->length, GET_REGISTER(vsrc2)); \
+ GOTO_exceptionThrown(); \
+ } \
+ ILOGV("+ APUT[%d]=0x%08x", GET_REGISTER(vsrc2), GET_REGISTER(vdst));\
+ ((_type*)(void*)arrayObj->contents)[GET_REGISTER(vsrc2)] = \
+ GET_REGISTER##_regsize(vdst); \
+ } \
+ FINISH(2);
+
+/*
+ * It's possible to get a bad value out of a field with sub-32-bit stores
+ * because the -quick versions always operate on 32 bits. Consider:
+ * short foo = -1 (sets a 32-bit register to 0xffffffff)
+ * iput-quick foo (writes all 32 bits to the field)
+ * short bar = 1 (sets a 32-bit register to 0x00000001)
+ * iput-short (writes the low 16 bits to the field)
+ * iget-quick foo (reads all 32 bits from the field, yielding 0xffff0001)
+ * This can only happen when optimized and non-optimized code has interleaved
+ * access to the same field. This is unlikely but possible.
+ *
+ * The easiest way to fix this is to always read/write 32 bits at a time. On
+ * a device with a 16-bit data bus this is sub-optimal. (The alternative
+ * approach is to have sub-int versions of iget-quick, but now we're wasting
+ * Dalvik instruction space and making it less likely that handler code will
+ * already be in the CPU i-cache.)
+ */
+#define HANDLE_IGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iget%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IGET_X_JUMBO(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \
+ vdst = FETCH(3); \
+ vsrc1 = FETCH(4); /* object ptr */ \
+ ILOGV("|iget%s/jumbo v%d,v%d,field@0x%08x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ SET_REGISTER##_regsize(vdst, \
+ dvmGetField##_ftype(obj, ifield->byteOffset)); \
+ ILOGV("+ IGET '%s'=0x%08llx", ifield->name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(5);
+
+#define HANDLE_IGET_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iget%s-quick v%d,v%d,field@+%u", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ SET_REGISTER##_regsize(vdst, dvmGetField##_ftype(obj, ref)); \
+ ILOGV("+ IGETQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|iput%s v%d,v%d,field@0x%04x", (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_IPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vBBBB, vCCCC, class@AAAAAAAA*/) \
+ { \
+ InstField* ifield; \
+ Object* obj; \
+ EXPORT_PC(); \
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \
+ vdst = FETCH(3); \
+ vsrc1 = FETCH(4); /* object ptr */ \
+ ILOGV("|iput%s/jumbo v%d,v%d,field@0x%08x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNull(obj)) \
+ GOTO_exceptionThrown(); \
+ ifield = (InstField*) dvmDexGetResolvedField(methodClassDex, ref); \
+ if (ifield == NULL) { \
+ ifield = dvmResolveInstField(curMethod->clazz, ref); \
+ if (ifield == NULL) \
+ GOTO_exceptionThrown(); \
+ } \
+ dvmSetField##_ftype(obj, ifield->byteOffset, \
+ GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUT '%s'=0x%08llx", ifield->name, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(5);
+
+#define HANDLE_IPUT_X_QUICK(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vA, vB, field@CCCC*/) \
+ { \
+ Object* obj; \
+ vdst = INST_A(inst); \
+ vsrc1 = INST_B(inst); /* object ptr */ \
+ ref = FETCH(1); /* field offset */ \
+ ILOGV("|iput%s-quick v%d,v%d,field@0x%04x", \
+ (_opname), vdst, vsrc1, ref); \
+ obj = (Object*) GET_REGISTER(vsrc1); \
+ if (!checkForNullExportPC(obj, fp, pc)) \
+ GOTO_exceptionThrown(); \
+ dvmSetField##_ftype(obj, ref, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ IPUTQ %d=0x%08llx", ref, \
+ (u8) GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+/*
+ * The JIT needs dvmDexGetResolvedField() to return non-null.
+ * Because the portable interpreter is not involved with the JIT
+ * and trace building, we only need the extra check here when this
+ * code is massaged into a stub called from an assembly interpreter.
+ * This is controlled by the JIT_STUB_HACK maco.
+ */
+
+#define HANDLE_SGET_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sget%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
+ JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
+ } \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->name, (u8)GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SGET_X_JUMBO(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/) \
+ { \
+ StaticField* sfield; \
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \
+ vdst = FETCH(3); \
+ ILOGV("|sget%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
+ JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
+ } \
+ } \
+ SET_REGISTER##_regsize(vdst, dvmGetStaticField##_ftype(sfield)); \
+ ILOGV("+ SGET '%s'=0x%08llx", \
+ sfield->name, (u8)GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(4);
+
+#define HANDLE_SPUT_X(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vAA, field@BBBB*/) \
+ { \
+ StaticField* sfield; \
+ vdst = INST_AA(inst); \
+ ref = FETCH(1); /* field ref */ \
+ ILOGV("|sput%s v%d,sfield@0x%04x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
+ JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
+ } \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->name, (u8)GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(2);
+
+#define HANDLE_SPUT_X_JUMBO(_opcode, _opname, _ftype, _regsize) \
+ HANDLE_OPCODE(_opcode /*vBBBB, class@AAAAAAAA*/) \
+ { \
+ StaticField* sfield; \
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* field ref */ \
+ vdst = FETCH(3); \
+ ILOGV("|sput%s/jumbo v%d,sfield@0x%08x", (_opname), vdst, ref); \
+ sfield = (StaticField*)dvmDexGetResolvedField(methodClassDex, ref); \
+ if (sfield == NULL) { \
+ EXPORT_PC(); \
+ sfield = dvmResolveStaticField(curMethod->clazz, ref); \
+ if (sfield == NULL) \
+ GOTO_exceptionThrown(); \
+ if (dvmDexGetResolvedField(methodClassDex, ref) == NULL) { \
+ JIT_STUB_HACK(dvmJitEndTraceSelect(self,pc)); \
+ } \
+ } \
+ dvmSetStaticField##_ftype(sfield, GET_REGISTER##_regsize(vdst)); \
+ ILOGV("+ SPUT '%s'=0x%08llx", \
+ sfield->name, (u8)GET_REGISTER##_regsize(vdst)); \
+ } \
+ FINISH(4);
+
+/* File: c/OP_BREAKPOINT.cpp */
+HANDLE_OPCODE(OP_BREAKPOINT)
+ {
+ /*
+ * Restart this instruction with the original opcode. We do
+ * this by simply jumping to the handler.
+ *
+ * It's probably not necessary to update "inst", but we do it
+ * for the sake of anything that needs to do disambiguation in a
+ * common handler with INST_INST.
+ *
+ * The breakpoint itself is handled over in updateDebugger(),
+ * because we need to detect other events (method entry, single
+ * step) and report them in the same event packet, and we're not
+ * yet handling those through breakpoint instructions. By the
+ * time we get here, the breakpoint has already been handled and
+ * the thread resumed.
+ */
+ u1 originalOpcode = dvmGetOriginalOpcode(pc);
+ LOGV("+++ break 0x%02x (0x%04x -> 0x%04x)", originalOpcode, inst,
+ INST_REPLACE_OP(inst, originalOpcode));
+ inst = INST_REPLACE_OP(inst, originalOpcode);
+ FINISH_BKPT(originalOpcode);
+ }
+OP_END
+
+/* File: c/OP_DISPATCH_FF.cpp */
+HANDLE_OPCODE(OP_DISPATCH_FF)
+ /*
+ * Indicates extended opcode. Use next 8 bits to choose where to branch.
+ */
+ DISPATCH_EXTENDED(INST_AA(inst));
+OP_END
+
+/* File: c/gotoTargets.cpp */
+/*
+ * C footer. This has some common code shared by the various targets.
+ */
+
+/*
+ * Everything from here on is a "goto target". In the basic interpreter
+ * we jump into these targets and then jump directly to the handler for
+ * next instruction. Here, these are subroutines that return to the caller.
+ */
+
+GOTO_TARGET(filledNewArray, bool methodCallRange, bool jumboFormat)
+ {
+ ClassObject* arrayClass;
+ ArrayObject* newArray;
+ u4* contents;
+ char typeCh;
+ int i;
+ u4 arg5;
+
+ EXPORT_PC();
+
+ if (jumboFormat) {
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* class ref */
+ vsrc1 = FETCH(3); /* #of elements */
+ vdst = FETCH(4); /* range base */
+ arg5 = -1; /* silence compiler warning */
+ ILOGV("|filled-new-array/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ } else {
+ ref = FETCH(1); /* class ref */
+ vdst = FETCH(2); /* first 4 regs -or- range base */
+
+ if (methodCallRange) {
+ vsrc1 = INST_AA(inst); /* #of elements */
+ arg5 = -1; /* silence compiler warning */
+ ILOGV("|filled-new-array-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ } else {
+ arg5 = INST_A(inst);
+ vsrc1 = INST_B(inst); /* #of elements */
+ ILOGV("|filled-new-array args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1, ref, vdst, arg5);
+ }
+ }
+
+ /*
+ * Resolve the array class.
+ */
+ arrayClass = dvmDexGetResolvedClass(methodClassDex, ref);
+ if (arrayClass == NULL) {
+ arrayClass = dvmResolveClass(curMethod->clazz, ref, false);
+ if (arrayClass == NULL)
+ GOTO_exceptionThrown();
+ }
+ /*
+ if (!dvmIsArrayClass(arrayClass)) {
+ dvmThrowRuntimeException(
+ "filled-new-array needs array class");
+ GOTO_exceptionThrown();
+ }
+ */
+ /* verifier guarantees this is an array class */
+ assert(dvmIsArrayClass(arrayClass));
+ assert(dvmIsClassInitialized(arrayClass));
+
+ /*
+ * Create an array of the specified type.
+ */
+ LOGVV("+++ filled-new-array type is '%s'", arrayClass->descriptor);
+ typeCh = arrayClass->descriptor[1];
+ if (typeCh == 'D' || typeCh == 'J') {
+ /* category 2 primitives not allowed */
+ dvmThrowRuntimeException("bad filled array req");
+ GOTO_exceptionThrown();
+ } else if (typeCh != 'L' && typeCh != '[' && typeCh != 'I') {
+ /* TODO: requires multiple "fill in" loops with different widths */
+ LOGE("non-int primitives not implemented");
+ dvmThrowInternalError(
+ "filled-new-array not implemented for anything but 'int'");
+ GOTO_exceptionThrown();
+ }
+
+ newArray = dvmAllocArrayByClass(arrayClass, vsrc1, ALLOC_DONT_TRACK);
+ if (newArray == NULL)
+ GOTO_exceptionThrown();
+
+ /*
+ * Fill in the elements. It's legal for vsrc1 to be zero.
+ */
+ contents = (u4*)(void*)newArray->contents;
+ if (methodCallRange) {
+ for (i = 0; i < vsrc1; i++)
+ contents[i] = GET_REGISTER(vdst+i);
+ } else {
+ assert(vsrc1 <= 5);
+ if (vsrc1 == 5) {
+ contents[4] = GET_REGISTER(arg5);
+ vsrc1--;
+ }
+ for (i = 0; i < vsrc1; i++) {
+ contents[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+ }
+ if (typeCh == 'L' || typeCh == '[') {
+ dvmWriteBarrierArray(newArray, 0, newArray->length);
+ }
+
+ retval.l = (Object*)newArray;
+ }
+ if (jumboFormat) {
+ FINISH(5);
+ } else {
+ FINISH(3);
+ }
+GOTO_TARGET_END
+
+
+GOTO_TARGET(invokeVirtual, bool methodCallRange, bool jumboFormat)
+ {
+ Method* baseMethod;
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ if (jumboFormat) {
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
+ vsrc1 = FETCH(3); /* count */
+ vdst = FETCH(4); /* first reg */
+ ADJUST_PC(2); /* advance pc partially to make returns easier */
+ ILOGV("|invoke-virtual/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(baseMethod->methodIndex < thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[baseMethod->methodIndex];
+
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+ self->methodToCall = methodToCall;
+ self->callsiteClass = thisPtr->clazz;
+#endif
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ /*
+ * This can happen if you create two classes, Base and Sub, where
+ * Sub is a sub-class of Base. Declare a protected abstract
+ * method foo() in Base, and invoke foo() from a method in Base.
+ * Base is an "abstract base class" and is never instantiated
+ * directly. Now, Override foo() in Sub, and use Sub. This
+ * Works fine unless Sub stops providing an implementation of
+ * the method.
+ */
+ dvmThrowAbstractMethodError("abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ base=%s.%s virtual[%d]=%s.%s",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+#if 0
+ if (vsrc1 != methodToCall->insSize) {
+ LOGW("WRONG METHOD: base=%s.%s virtual[%d]=%s.%s",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ (u4) baseMethod->methodIndex,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ //dvmDumpClass(baseMethod->clazz);
+ //dvmDumpClass(methodToCall->clazz);
+ dvmDumpAllClasses(0);
+ }
+#endif
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuper, bool methodCallRange, bool jumboFormat)
+ {
+ Method* baseMethod;
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ if (jumboFormat) {
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
+ vsrc1 = FETCH(3); /* count */
+ vdst = FETCH(4); /* first reg */
+ ADJUST_PC(2); /* advance pc partially to make returns easier */
+ ILOGV("|invoke-super/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ }
+
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ /*
+ * Resolve the method. This is the correct method for the static
+ * type of the object. We also verify access permissions here.
+ * The first arg to dvmResolveMethod() is just the referring class
+ * (used for class loaders and such), so we don't want to pass
+ * the superclass into the resolution call.
+ */
+ baseMethod = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (baseMethod == NULL) {
+ baseMethod = dvmResolveMethod(curMethod->clazz, ref,METHOD_VIRTUAL);
+ if (baseMethod == NULL) {
+ ILOGV("+ unknown method or access denied");
+ GOTO_exceptionThrown();
+ }
+ }
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in that class' superclass.
+ */
+ if (baseMethod->methodIndex >= curMethod->clazz->super->vtableCount) {
+ /*
+ * Method does not exist in the superclass. Could happen if
+ * superclass gets updated.
+ */
+ dvmThrowNoSuchMethodError(baseMethod->name);
+ GOTO_exceptionThrown();
+ }
+ methodToCall = curMethod->clazz->super->vtable[baseMethod->methodIndex];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowAbstractMethodError("abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ base=%s.%s super-virtual=%s.%s",
+ baseMethod->clazz->descriptor, baseMethod->name,
+ methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeInterface, bool methodCallRange, bool jumboFormat)
+ {
+ Object* thisPtr;
+ ClassObject* thisClass;
+
+ EXPORT_PC();
+
+ if (jumboFormat) {
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
+ vsrc1 = FETCH(3); /* count */
+ vdst = FETCH(4); /* first reg */
+ ADJUST_PC(2); /* advance pc partially to make returns easier */
+ ILOGV("|invoke-interface/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-interface-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-interface args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+ thisClass = thisPtr->clazz;
+
+
+ /*
+ * Given a class and a method index, find the Method* with the
+ * actual code we want to execute.
+ */
+ methodToCall = dvmFindInterfaceMethodInCache(thisClass, ref, curMethod,
+ methodClassDex);
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+ self->callsiteClass = thisClass;
+ self->methodToCall = methodToCall;
+#endif
+ if (methodToCall == NULL) {
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeDirect, bool methodCallRange, bool jumboFormat)
+ {
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ if (jumboFormat) {
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
+ vsrc1 = FETCH(3); /* count */
+ vdst = FETCH(4); /* first reg */
+ ADJUST_PC(2); /* advance pc partially to make returns easier */
+ ILOGV("|invoke-direct/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-direct-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-direct args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ }
+
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref,
+ METHOD_DIRECT);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown direct method"); // should be impossible
+ GOTO_exceptionThrown();
+ }
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeStatic, bool methodCallRange, bool jumboFormat)
+ EXPORT_PC();
+
+ if (jumboFormat) {
+ ref = FETCH(1) | (u4)FETCH(2) << 16; /* method ref */
+ vsrc1 = FETCH(3); /* count */
+ vdst = FETCH(4); /* first reg */
+ ADJUST_PC(2); /* advance pc partially to make returns easier */
+ ILOGV("|invoke-static/jumbo args=%d @0x%08x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ } else {
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* method ref */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange)
+ ILOGV("|invoke-static-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ else
+ ILOGV("|invoke-static args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ }
+
+ methodToCall = dvmDexGetResolvedMethod(methodClassDex, ref);
+ if (methodToCall == NULL) {
+ methodToCall = dvmResolveMethod(curMethod->clazz, ref, METHOD_STATIC);
+ if (methodToCall == NULL) {
+ ILOGV("+ unknown method");
+ GOTO_exceptionThrown();
+ }
+
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+ /*
+ * The JIT needs dvmDexGetResolvedMethod() to return non-null.
+ * Include the check if this code is being used as a stub
+ * called from the assembly interpreter.
+ */
+ if ((self->interpBreak.ctl.subMode & kSubModeJitTraceBuild) &&
+ (dvmDexGetResolvedMethod(methodClassDex, ref) == NULL)) {
+ /* Class initialization is still ongoing */
+ dvmJitEndTraceSelect(self,pc);
+ }
+#endif
+ }
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeVirtualQuick, bool methodCallRange, bool jumboFormat)
+ {
+ Object* thisPtr;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ /*
+ * The object against which we are executing a method is always
+ * in the first argument.
+ */
+ if (methodCallRange) {
+ assert(vsrc1 > 0);
+ ILOGV("|invoke-virtual-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisPtr = (Object*) GET_REGISTER(vdst);
+ } else {
+ assert((vsrc1>>4) > 0);
+ ILOGV("|invoke-virtual-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisPtr = (Object*) GET_REGISTER(vdst & 0x0f);
+ }
+
+ if (!checkForNull(thisPtr))
+ GOTO_exceptionThrown();
+
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method.
+ */
+ assert(ref < (unsigned int) thisPtr->clazz->vtableCount);
+ methodToCall = thisPtr->clazz->vtable[ref];
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+ self->callsiteClass = thisPtr->clazz;
+ self->methodToCall = methodToCall;
+#endif
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowAbstractMethodError("abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+
+ LOGVV("+++ virtual[%d]=%s.%s",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+GOTO_TARGET(invokeSuperQuick, bool methodCallRange, bool jumboFormat)
+ {
+ u2 thisReg;
+
+ EXPORT_PC();
+
+ vsrc1 = INST_AA(inst); /* AA (count) or BA (count + arg 5) */
+ ref = FETCH(1); /* vtable index */
+ vdst = FETCH(2); /* 4 regs -or- first reg */
+
+ if (methodCallRange) {
+ ILOGV("|invoke-super-quick-range args=%d @0x%04x {regs=v%d-v%d}",
+ vsrc1, ref, vdst, vdst+vsrc1-1);
+ thisReg = vdst;
+ } else {
+ ILOGV("|invoke-super-quick args=%d @0x%04x {regs=0x%04x %x}",
+ vsrc1 >> 4, ref, vdst, vsrc1 & 0x0f);
+ thisReg = vdst & 0x0f;
+ }
+ /* impossible in well-formed code, but we must check nevertheless */
+ if (!checkForNull((Object*) GET_REGISTER(thisReg)))
+ GOTO_exceptionThrown();
+
+#if 0 /* impossible in optimized + verified code */
+ if (ref >= curMethod->clazz->super->vtableCount) {
+ dvmThrowNoSuchMethodError(NULL);
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(ref < (unsigned int) curMethod->clazz->super->vtableCount);
+#endif
+
+ /*
+ * Combine the object we found with the vtable offset in the
+ * method's class.
+ *
+ * We're using the current method's class' superclass, not the
+ * superclass of "this". This is because we might be executing
+ * in a method inherited from a superclass, and we want to run
+ * in the method's class' superclass.
+ */
+ methodToCall = curMethod->clazz->super->vtable[ref];
+
+#if 0
+ if (dvmIsAbstractMethod(methodToCall)) {
+ dvmThrowAbstractMethodError("abstract method not implemented");
+ GOTO_exceptionThrown();
+ }
+#else
+ assert(!dvmIsAbstractMethod(methodToCall) ||
+ methodToCall->nativeFunc != NULL);
+#endif
+ LOGVV("+++ super-virtual[%d]=%s.%s",
+ ref, methodToCall->clazz->descriptor, methodToCall->name);
+ assert(methodToCall != NULL);
+ GOTO_invokeMethod(methodCallRange, methodToCall, vsrc1, vdst);
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * General handling for return-void, return, and return-wide. Put the
+ * return value in "retval" before jumping here.
+ */
+GOTO_TARGET(returnFromMethod)
+ {
+ StackSaveArea* saveArea;
+
+ /*
+ * We must do this BEFORE we pop the previous stack frame off, so
+ * that the GC can see the return value (if any) in the local vars.
+ *
+ * Since this is now an interpreter switch point, we must do it before
+ * we do anything at all.
+ */
+ PERIODIC_CHECKS(0);
+
+ ILOGV("> retval=0x%llx (leaving %s.%s %s)",
+ retval.j, curMethod->clazz->descriptor, curMethod->name,
+ curMethod->shorty);
+ //DUMP_REGS(curMethod, fp);
+
+ saveArea = SAVEAREA_FROM_FP(fp);
+
+#ifdef EASY_GDB
+ debugSaveArea = saveArea;
+#endif
+
+ /* back up to previous frame and see if we hit a break */
+ fp = (u4*)saveArea->prevFrame;
+ assert(fp != NULL);
+
+ /* Handle any special subMode requirements */
+ if (self->interpBreak.ctl.subMode != 0) {
+ PC_FP_TO_SELF();
+ dvmReportReturn(self);
+ }
+
+ if (dvmIsBreakFrame(fp)) {
+ /* bail without popping the method frame from stack */
+ LOGVV("+++ returned into break frame");
+ GOTO_bail();
+ }
+
+ /* update thread FP, and reset local variables */
+ self->interpSave.curFrame = fp;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = saveArea->savedPc;
+ ILOGD("> (return to %s.%s %s)", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->shorty);
+
+ /* use FINISH on the caller's invoke instruction */
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+GOTO_TARGET_END
+
+
+ /*
+ * Jump here when the code throws an exception.
+ *
+ * By the time we get here, the Throwable has been created and the stack
+ * trace has been saved off.
+ */
+GOTO_TARGET(exceptionThrown)
+ {
+ Object* exception;
+ int catchRelPc;
+
+ PERIODIC_CHECKS(0);
+
+ /*
+ * We save off the exception and clear the exception status. While
+ * processing the exception we might need to load some Throwable
+ * classes, and we don't want class loader exceptions to get
+ * confused with this one.
+ */
+ assert(dvmCheckException(self));
+ exception = dvmGetException(self);
+ dvmAddTrackedAlloc(exception, self);
+ dvmClearException(self);
+
+ LOGV("Handling exception %s at %s:%d",
+ exception->clazz->descriptor, curMethod->name,
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+
+ /*
+ * Report the exception throw to any "subMode" watchers.
+ *
+ * TODO: if the exception was thrown by interpreted code, control
+ * fell through native, and then back to us, we will report the
+ * exception at the point of the throw and again here. We can avoid
+ * this by not reporting exceptions when we jump here directly from
+ * the native call code above, but then we won't report exceptions
+ * that were thrown *from* the JNI code (as opposed to *through* it).
+ *
+ * The correct solution is probably to ignore from-native exceptions
+ * here, and have the JNI exception code do the reporting to the
+ * debugger.
+ */
+ if (self->interpBreak.ctl.subMode != 0) {
+ PC_FP_TO_SELF();
+ dvmReportExceptionThrow(self, exception);
+ }
+
+ /*
+ * We need to unroll to the catch block or the nearest "break"
+ * frame.
+ *
+ * A break frame could indicate that we have reached an intermediate
+ * native call, or have gone off the top of the stack and the thread
+ * needs to exit. Either way, we return from here, leaving the
+ * exception raised.
+ *
+ * If we do find a catch block, we want to transfer execution to
+ * that point.
+ *
+ * Note this can cause an exception while resolving classes in
+ * the "catch" blocks.
+ */
+ catchRelPc = dvmFindCatchBlock(self, pc - curMethod->insns,
+ exception, false, (void**)(void*)&fp);
+
+ /*
+ * Restore the stack bounds after an overflow. This isn't going to
+ * be correct in all circumstances, e.g. if JNI code devours the
+ * exception this won't happen until some other exception gets
+ * thrown. If the code keeps pushing the stack bounds we'll end
+ * up aborting the VM.
+ *
+ * Note we want to do this *after* the call to dvmFindCatchBlock,
+ * because that may need extra stack space to resolve exception
+ * classes (e.g. through a class loader).
+ *
+ * It's possible for the stack overflow handling to cause an
+ * exception (specifically, class resolution in a "catch" block
+ * during the call above), so we could see the thread's overflow
+ * flag raised but actually be running in a "nested" interpreter
+ * frame. We don't allow doubled-up StackOverflowErrors, so
+ * we can check for this by just looking at the exception type
+ * in the cleanup function. Also, we won't unroll past the SOE
+ * point because the more-recent exception will hit a break frame
+ * as it unrolls to here.
+ */
+ if (self->stackOverflowed)
+ dvmCleanupStackOverflow(self, exception);
+
+ if (catchRelPc < 0) {
+ /* falling through to JNI code or off the bottom of the stack */
+#if DVM_SHOW_EXCEPTION >= 2
+ LOGD("Exception %s from %s:%d not caught locally",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns));
+#endif
+ dvmSetException(self, exception);
+ dvmReleaseTrackedAlloc(exception, self);
+ GOTO_bail();
+ }
+
+#if DVM_SHOW_EXCEPTION >= 3
+ {
+ const Method* catchMethod = SAVEAREA_FROM_FP(fp)->method;
+ LOGD("Exception %s thrown from %s:%d to %s:%d",
+ exception->clazz->descriptor, dvmGetMethodSourceFile(curMethod),
+ dvmLineNumFromPC(curMethod, pc - curMethod->insns),
+ dvmGetMethodSourceFile(catchMethod),
+ dvmLineNumFromPC(catchMethod, catchRelPc));
+ }
+#endif
+
+ /*
+ * Adjust local variables to match self->interpSave.curFrame and the
+ * updated PC.
+ */
+ //fp = (u4*) self->interpSave.curFrame;
+ curMethod = SAVEAREA_FROM_FP(fp)->method;
+ self->interpSave.method = curMethod;
+ //methodClass = curMethod->clazz;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = curMethod->insns + catchRelPc;
+ ILOGV("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->shorty);
+ DUMP_REGS(curMethod, fp, false); // show all regs
+
+ /*
+ * Restore the exception if the handler wants it.
+ *
+ * The Dalvik spec mandates that, if an exception handler wants to
+ * do something with the exception, the first instruction executed
+ * must be "move-exception". We can pass the exception along
+ * through the thread struct, and let the move-exception instruction
+ * clear it for us.
+ *
+ * If the handler doesn't call move-exception, we don't want to
+ * finish here with an exception still pending.
+ */
+ if (INST_INST(FETCH(0)) == OP_MOVE_EXCEPTION)
+ dvmSetException(self, exception);
+
+ dvmReleaseTrackedAlloc(exception, self);
+ FINISH(0);
+ }
+GOTO_TARGET_END
+
+
+
+ /*
+ * General handling for invoke-{virtual,super,direct,static,interface},
+ * including "quick" variants.
+ *
+ * Set "methodToCall" to the Method we're calling, and "methodCallRange"
+ * depending on whether this is a "/range" instruction.
+ *
+ * For a range call:
+ * "vsrc1" holds the argument count (8 bits)
+ * "vdst" holds the first argument in the range
+ * For a non-range call:
+ * "vsrc1" holds the argument count (4 bits) and the 5th argument index
+ * "vdst" holds four 4-bit register indices
+ *
+ * The caller must EXPORT_PC before jumping here, because any method
+ * call can throw a stack overflow exception.
+ */
+GOTO_TARGET(invokeMethod, bool methodCallRange, const Method* _methodToCall,
+ u2 count, u2 regs)
+ {
+ STUB_HACK(vsrc1 = count; vdst = regs; methodToCall = _methodToCall;);
+
+ //printf("range=%d call=%p count=%d regs=0x%04x\n",
+ // methodCallRange, methodToCall, count, regs);
+ //printf(" --> %s.%s %s\n", methodToCall->clazz->descriptor,
+ // methodToCall->name, methodToCall->shorty);
+
+ u4* outs;
+ int i;
+
+ /*
+ * Copy args. This may corrupt vsrc1/vdst.
+ */
+ if (methodCallRange) {
+ // could use memcpy or a "Duff's device"; most functions have
+ // so few args it won't matter much
+ assert(vsrc1 <= curMethod->outsSize);
+ assert(vsrc1 == methodToCall->insSize);
+ outs = OUTS_FROM_FP(fp, vsrc1);
+ for (i = 0; i < vsrc1; i++)
+ outs[i] = GET_REGISTER(vdst+i);
+ } else {
+ u4 count = vsrc1 >> 4;
+
+ assert(count <= curMethod->outsSize);
+ assert(count == methodToCall->insSize);
+ assert(count <= 5);
+
+ outs = OUTS_FROM_FP(fp, count);
+#if 0
+ if (count == 5) {
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ count--;
+ }
+ for (i = 0; i < (int) count; i++) {
+ outs[i] = GET_REGISTER(vdst & 0x0f);
+ vdst >>= 4;
+ }
+#else
+ // This version executes fewer instructions but is larger
+ // overall. Seems to be a teensy bit faster.
+ assert((vdst >> 16) == 0); // 16 bits -or- high 16 bits clear
+ switch (count) {
+ case 5:
+ outs[4] = GET_REGISTER(vsrc1 & 0x0f);
+ case 4:
+ outs[3] = GET_REGISTER(vdst >> 12);
+ case 3:
+ outs[2] = GET_REGISTER((vdst & 0x0f00) >> 8);
+ case 2:
+ outs[1] = GET_REGISTER((vdst & 0x00f0) >> 4);
+ case 1:
+ outs[0] = GET_REGISTER(vdst & 0x0f);
+ default:
+ ;
+ }
+#endif
+ }
+ }
+
+ /*
+ * (This was originally a "goto" target; I've kept it separate from the
+ * stuff above in case we want to refactor things again.)
+ *
+ * At this point, we have the arguments stored in the "outs" area of
+ * the current method's stack frame, and the method to call in
+ * "methodToCall". Push a new stack frame.
+ */
+ {
+ StackSaveArea* newSaveArea;
+ u4* newFp;
+
+ ILOGV("> %s%s.%s %s",
+ dvmIsNativeMethod(methodToCall) ? "(NATIVE) " : "",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ methodToCall->shorty);
+
+ newFp = (u4*) SAVEAREA_FROM_FP(fp) - methodToCall->registersSize;
+ newSaveArea = SAVEAREA_FROM_FP(newFp);
+
+ /* verify that we have enough space */
+ if (true) {
+ u1* bottom;
+ bottom = (u1*) newSaveArea - methodToCall->outsSize * sizeof(u4);
+ if (bottom < self->interpStackEnd) {
+ /* stack overflow */
+ LOGV("Stack overflow on method call (start=%p end=%p newBot=%p(%d) size=%d '%s')",
+ self->interpStackStart, self->interpStackEnd, bottom,
+ (u1*) fp - bottom, self->interpStackSize,
+ methodToCall->name);
+ dvmHandleStackOverflow(self, methodToCall);
+ assert(dvmCheckException(self));
+ GOTO_exceptionThrown();
+ }
+ //LOGD("+++ fp=%p newFp=%p newSave=%p bottom=%p",
+ // fp, newFp, newSaveArea, bottom);
+ }
+
+#ifdef LOG_INSTR
+ if (methodToCall->registersSize > methodToCall->insSize) {
+ /*
+ * This makes valgrind quiet when we print registers that
+ * haven't been initialized. Turn it off when the debug
+ * messages are disabled -- we want valgrind to report any
+ * used-before-initialized issues.
+ */
+ memset(newFp, 0xcc,
+ (methodToCall->registersSize - methodToCall->insSize) * 4);
+ }
+#endif
+
+#ifdef EASY_GDB
+ newSaveArea->prevSave = SAVEAREA_FROM_FP(fp);
+#endif
+ newSaveArea->prevFrame = fp;
+ newSaveArea->savedPc = pc;
+#if defined(WITH_JIT) && defined(MTERP_STUB)
+ newSaveArea->returnAddr = 0;
+#endif
+ newSaveArea->method = methodToCall;
+
+ if (self->interpBreak.ctl.subMode != 0) {
+ /*
+ * We mark ENTER here for both native and non-native
+ * calls. For native calls, we'll mark EXIT on return.
+ * For non-native calls, EXIT is marked in the RETURN op.
+ */
+ PC_TO_SELF();
+ dvmReportInvoke(self, methodToCall);
+ }
+
+ if (!dvmIsNativeMethod(methodToCall)) {
+ /*
+ * "Call" interpreted code. Reposition the PC, update the
+ * frame pointer and other local state, and continue.
+ */
+ curMethod = methodToCall;
+ self->interpSave.method = curMethod;
+ methodClassDex = curMethod->clazz->pDvmDex;
+ pc = methodToCall->insns;
+ self->interpSave.curFrame = newFp;
+ fp = newFp;
+#ifdef EASY_GDB
+ debugSaveArea = SAVEAREA_FROM_FP(newFp);
+#endif
+ self->debugIsMethodEntry = true; // profiling, debugging
+ ILOGD("> pc <-- %s.%s %s", curMethod->clazz->descriptor,
+ curMethod->name, curMethod->shorty);
+ DUMP_REGS(curMethod, fp, true); // show input args
+ FINISH(0); // jump to method start
+ } else {
+ /* set this up for JNI locals, even if not a JNI native */
+ newSaveArea->xtra.localRefCookie = self->jniLocalRefTable.segmentState.all;
+
+ self->interpSave.curFrame = newFp;
+
+ DUMP_REGS(methodToCall, newFp, true); // show input args
+
+ if (self->interpBreak.ctl.subMode != 0) {
+ dvmReportPreNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
+ }
+
+ ILOGD("> native <-- %s.%s %s", methodToCall->clazz->descriptor,
+ methodToCall->name, methodToCall->shorty);
+
+ /*
+ * Jump through native call bridge. Because we leave no
+ * space for locals on native calls, "newFp" points directly
+ * to the method arguments.
+ */
+ (*methodToCall->nativeFunc)(newFp, &retval, methodToCall, self);
+
+ if (self->interpBreak.ctl.subMode != 0) {
+ dvmReportPostNativeInvoke(methodToCall, self, newSaveArea->prevFrame);
+ }
+
+ /* pop frame off */
+ dvmPopJniLocals(self, newSaveArea);
+ self->interpSave.curFrame = newSaveArea->prevFrame;
+ fp = newSaveArea->prevFrame;
+
+ /*
+ * If the native code threw an exception, or interpreted code
+ * invoked by the native call threw one and nobody has cleared
+ * it, jump to our local exception handling.
+ */
+ if (dvmCheckException(self)) {
+ LOGV("Exception thrown by/below native code");
+ GOTO_exceptionThrown();
+ }
+
+ ILOGD("> retval=0x%llx (leaving native)", retval.j);
+ ILOGD("> (return from native %s.%s to %s.%s %s)",
+ methodToCall->clazz->descriptor, methodToCall->name,
+ curMethod->clazz->descriptor, curMethod->name,
+ curMethod->shorty);
+
+ //u2 invokeInstr = INST_INST(FETCH(0));
+ if (true /*invokeInstr >= OP_INVOKE_VIRTUAL &&
+ invokeInstr <= OP_INVOKE_INTERFACE*/)
+ {
+ FINISH(3);
+ } else {
+ //LOGE("Unknown invoke instr %02x at %d",
+ // invokeInstr, (int) (pc - curMethod->insns));
+ assert(false);
+ }
+ }
+ }
+ assert(false); // should not get here
+GOTO_TARGET_END
+
+/* File: cstubs/enddefs.cpp */
+
+/* undefine "magic" name remapping */
+#undef retval
+#undef pc
+#undef fp
+#undef curMethod
+#undef methodClassDex
+#undef self
+#undef debugTrackedRefStart
+
+/* File: mips/debug.cpp */
+#include <inttypes.h>
+
+/*
+ * Dump the fixed-purpose MIPS registers, along with some other info.
+ *
+ */
+void dvmMterpDumpMipsRegs(uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3)
+{
+ register uint32_t rPC asm("s0");
+ register uint32_t rFP asm("s1");
+ register uint32_t rSELF asm("s2");
+ register uint32_t rIBASE asm("s3");
+ register uint32_t rINST asm("s4");
+ register uint32_t rOBJ asm("s5");
+ register uint32_t rBIX asm("s6");
+ register uint32_t rTEMP asm("s7");
+
+ //extern char dvmAsmInstructionStart[];
+
+ printf("REGS: a0=%08x a1=%08x a2=%08x a3=%08x\n", a0, a1, a2, a3);
+ printf(" : rPC=%08x rFP=%08x rSELF=%08x rIBASE=%08x\n",
+ rPC, rFP, rSELF, rIBASE);
+ printf(" : rINST=%08x rOBJ=%08x rBIX=%08x rTEMP=%08x \n", rINST, rOBJ, rBIX, rTEMP);
+
+ //Thread* self = (Thread*) rSELF;
+ //const Method* method = self->method;
+ printf(" + self is %p\n", dvmThreadSelf());
+ //printf(" + currently in %s.%s %s\n",
+ // method->clazz->descriptor, method->name, method->signature);
+ //printf(" + dvmAsmInstructionStart = %p\n", dvmAsmInstructionStart);
+ //printf(" + next handler for 0x%02x = %p\n",
+ // rINST & 0xff, dvmAsmInstructionStart + (rINST & 0xff) * 64);
+}
+
+/*
+ * Dump the StackSaveArea for the specified frame pointer.
+ */
+void dvmDumpFp(void* fp, StackSaveArea* otherSaveArea)
+{
+ StackSaveArea* saveArea = SAVEAREA_FROM_FP(fp);
+ printf("StackSaveArea for fp %p [%p/%p]:\n", fp, saveArea, otherSaveArea);
+#ifdef EASY_GDB
+ printf(" prevSave=%p, prevFrame=%p savedPc=%p meth=%p curPc=%p\n",
+ saveArea->prevSave, saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc);
+#else
+ printf(" prevFrame=%p savedPc=%p meth=%p curPc=%p fp[0]=0x%08x\n",
+ saveArea->prevFrame, saveArea->savedPc,
+ saveArea->method, saveArea->xtra.currentPc,
+ *(u4*)fp);
+#endif
+}
+
+/*
+ * Does the bulk of the work for common_printMethod().
+ */
+void dvmMterpPrintMethod(Method* method)
+{
+ /*
+ * It is a direct (non-virtual) method if it is static, private,
+ * or a constructor.
+ */
+ bool isDirect =
+ ((method->accessFlags & (ACC_STATIC|ACC_PRIVATE)) != 0) ||
+ (method->name[0] == '<');
+
+ char* desc = dexProtoCopyMethodDescriptor(&method->prototype);
+
+ printf("<%c:%s.%s %s> ",
+ isDirect ? 'D' : 'V',
+ method->clazz->descriptor,
+ method->name,
+ desc);
+
+ free(desc);
+}
+
diff --git a/vm/mterp/out/InterpC-portable.cpp b/vm/mterp/out/InterpC-portable.cpp
index 751184f0b..0c80873ba 100644
--- a/vm/mterp/out/InterpC-portable.cpp
+++ b/vm/mterp/out/InterpC-portable.cpp
@@ -68,6 +68,14 @@
#if defined(__ARM_EABI__)
# define NO_UNALIGN_64__UNION
#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
//#define LOG_INSTR /* verbose debugging */
diff --git a/vm/mterp/out/InterpC-x86-atom.cpp b/vm/mterp/out/InterpC-x86-atom.cpp
index b64c8a3a2..399a91880 100644
--- a/vm/mterp/out/InterpC-x86-atom.cpp
+++ b/vm/mterp/out/InterpC-x86-atom.cpp
@@ -68,6 +68,14 @@
#if defined(__ARM_EABI__)
# define NO_UNALIGN_64__UNION
#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
//#define LOG_INSTR /* verbose debugging */
diff --git a/vm/mterp/out/InterpC-x86.cpp b/vm/mterp/out/InterpC-x86.cpp
index 2dd7f0861..529bcb185 100644
--- a/vm/mterp/out/InterpC-x86.cpp
+++ b/vm/mterp/out/InterpC-x86.cpp
@@ -68,6 +68,14 @@
#if defined(__ARM_EABI__)
# define NO_UNALIGN_64__UNION
#endif
+/*
+ * MIPS ABI requires 64-bit alignment for access to 64-bit data types.
+ *
+ * Use memcpy() to do the transfer
+ */
+#if defined(__mips__)
+/* # define NO_UNALIGN_64__UNION */
+#endif
//#define LOG_INSTR /* verbose debugging */
diff --git a/vm/mterp/rebuild.sh b/vm/mterp/rebuild.sh
index 201432432..e550a5d35 100755
--- a/vm/mterp/rebuild.sh
+++ b/vm/mterp/rebuild.sh
@@ -20,7 +20,7 @@
#
set -e
-for arch in portable allstubs armv5te armv5te-vfp armv7-a armv7-a-neon x86 x86-atom; do TARGET_ARCH_EXT=$arch make -f Makefile-mterp; done
+for arch in portable allstubs armv5te armv5te-vfp armv7-a armv7-a-neon mips x86 x86-atom; do TARGET_ARCH_EXT=$arch make -f Makefile-mterp; done
# These aren't actually used, so just go ahead and remove them. The correct
# approach is to prevent them from being generated in the first place, but