summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.mk8
-rw-r--r--compiler/dex/quick/codegen_util.cc3
-rw-r--r--compiler/dex/quick/gen_common.cc7
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc35
-rw-r--r--compiler/dex/quick/mips64/assemble_mips64.cc898
-rw-r--r--compiler/dex/quick/mips64/backend_mips64.h32
-rw-r--r--compiler/dex/quick/mips64/call_mips64.cc421
-rw-r--r--compiler/dex/quick/mips64/codegen_mips64.h328
-rw-r--r--compiler/dex/quick/mips64/fp_mips64.cc253
-rw-r--r--compiler/dex/quick/mips64/int_mips64.cc694
-rw-r--r--compiler/dex/quick/mips64/mips64_lir.h648
-rw-r--r--compiler/dex/quick/mips64/target_mips64.cc653
-rw-r--r--compiler/dex/quick/mips64/utility_mips64.cc875
-rw-r--r--compiler/dex/quick/quick_compiler.cc26
-rw-r--r--compiler/dex/quick/ralloc_util.cc2
-rw-r--r--compiler/driver/compiler_driver.cc6
-rw-r--r--compiler/jni/quick/calling_convention.cc5
-rw-r--r--compiler/jni/quick/jni_compiler.cc4
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.cc201
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.h91
-rw-r--r--dex2oat/dex2oat.cc5
-rw-r--r--disassembler/disassembler_mips64.cc128
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S705
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc2
24 files changed, 5878 insertions, 152 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 86a27c1b57..090675356f 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -48,6 +48,12 @@ LIBART_COMPILER_SRC_FILES := \
dex/quick/mips/int_mips.cc \
dex/quick/mips/target_mips.cc \
dex/quick/mips/utility_mips.cc \
+ dex/quick/mips64/assemble_mips64.cc \
+ dex/quick/mips64/call_mips64.cc \
+ dex/quick/mips64/fp_mips64.cc \
+ dex/quick/mips64/int_mips64.cc \
+ dex/quick/mips64/target_mips64.cc \
+ dex/quick/mips64/utility_mips64.cc \
dex/quick/mir_to_lir.cc \
dex/quick/quick_compiler.cc \
dex/quick/ralloc_util.cc \
@@ -83,6 +89,7 @@ LIBART_COMPILER_SRC_FILES := \
jni/quick/arm/calling_convention_arm.cc \
jni/quick/arm64/calling_convention_arm64.cc \
jni/quick/mips/calling_convention_mips.cc \
+ jni/quick/mips64/calling_convention_mips64.cc \
jni/quick/x86/calling_convention_x86.cc \
jni/quick/x86_64/calling_convention_x86_64.cc \
jni/quick/calling_convention.cc \
@@ -154,6 +161,7 @@ LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
dex/quick/arm/arm_lir.h \
dex/quick/arm64/arm64_lir.h \
dex/quick/mips/mips_lir.h \
+ dex/quick/mips64/mips64_lir.h \
dex/quick/resource_mask.h \
dex/compiler_enums.h \
dex/global_value_numbering.h \
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 0bac511dd1..029c0ca8c0 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -541,6 +541,7 @@ void Mir2Lir::InstallSwitchTables() {
break;
case kArm64:
case kMips:
+ case kMips64:
bx_offset = tab_rec->anchor->offset;
break;
default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
@@ -1203,6 +1204,7 @@ void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType t
LIR* load_pc_rel = OpPcRelLoad(TargetPtrReg(symbolic_reg), data_target);
AppendLIR(load_pc_rel);
DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
+ DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
}
void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
@@ -1220,6 +1222,7 @@ void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType
LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
AppendLIR(load_pc_rel);
DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
+ DCHECK_NE(cu_->instruction_set, kMips64) << reinterpret_cast<void*>(data_target);
}
void Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index afae89d150..e57889aeb7 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -482,6 +482,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
r_val = AllocTemp();
break;
case kMips:
+ case kMips64:
r_val = AllocTemp();
break;
default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
@@ -1695,7 +1696,8 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
StoreValue(rl_dest, rl_result);
} else {
bool done = false; // Set to true if we happen to find a way to use a real instruction.
- if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
+ cu_->instruction_set == kArm64) {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero && (flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
@@ -1990,7 +1992,8 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
}
bool done = false;
- if (cu_->instruction_set == kMips || cu_->instruction_set == kArm64) {
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64 ||
+ cu_->instruction_set == kArm64) {
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div);
done = true;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 01f1d375ed..6b553fd181 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -222,7 +222,8 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampo
RegLocation arg0, RegLocation arg1,
bool safepoint_pc) {
RegStorage r_tgt = CallHelperSetup(trampoline);
- if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
+ if (cu_->instruction_set == kArm64 || cu_->instruction_set == kMips64 ||
+ cu_->instruction_set == kX86_64) {
RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
RegStorage arg1_reg;
@@ -900,8 +901,8 @@ RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
}
bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
@@ -1028,8 +1029,8 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
// Generates an inlined String.is_empty or String.length.
bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
// dst = src.length();
@@ -1060,8 +1061,8 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
}
bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation.
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info); // result reg
@@ -1195,8 +1196,8 @@ bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
}
bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
RegLocation rl_dest = InlineTarget(info);
@@ -1210,8 +1211,8 @@ bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
}
bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
RegLocation rl_dest = InlineTargetWide(info);
@@ -1281,8 +1282,8 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
/* Fast string.compareTo(Ljava/lang/string;)I. */
bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
ClobberCallerSave();
@@ -1336,8 +1337,8 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
bool is_long, bool is_volatile) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
// Unused - RegLocation rl_src_unsafe = info->args[0];
@@ -1381,8 +1382,8 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
bool is_object, bool is_volatile, bool is_ordered) {
- if (cu_->instruction_set == kMips) {
- // TODO - add Mips implementation
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
+ // TODO: add Mips and Mips64 implementations.
return false;
}
// Unused - RegLocation rl_src_unsafe = info->args[0];
diff --git a/compiler/dex/quick/mips64/assemble_mips64.cc b/compiler/dex/quick/mips64/assemble_mips64.cc
new file mode 100644
index 0000000000..17a0ef1bb4
--- /dev/null
+++ b/compiler/dex/quick/mips64/assemble_mips64.cc
@@ -0,0 +1,898 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/compiler_ir.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+#define MAX_ASSEMBLER_RETRIES 50
+
+/*
+ * opcode: Mips64OpCode enum
+ * skeleton: pre-designated bit-pattern for this opcode
+ * k0: key to applying ds/de
+ * ds: dest start bit position
+ * de: dest end bit position
+ * k1: key to applying s1s/s1e
+ * s1s: src1 start bit position
+ * s1e: src1 end bit position
+ * k2: key to applying s2s/s2e
+ * s2s: src2 start bit position
+ * s2e: src2 end bit position
+ * operands: number of operands (for sanity check purposes)
+ * name: mnemonic name
+ * fmt: for pretty-printing
+ */
+#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
+ k3, k3s, k3e, flags, name, fmt, size) \
+ {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
+ {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
+
+/* Instruction dump string format keys: !pf, where "!" is the start
+ * of the key, "p" is which numeric operand to use and "f" is the
+ * print format.
+ *
+ * [p]ositions:
+ * 0 -> operands[0] (dest)
+ * 1 -> operands[1] (src1)
+ * 2 -> operands[2] (src2)
+ * 3 -> operands[3] (extra)
+ *
+ * [f]ormats:
+ * h -> 4-digit hex
+ * d -> decimal
+ * E -> decimal*4
+ * F -> decimal*2
+ * c -> branch condition (beq, bne, etc.)
+ * t -> pc-relative target
+ * T -> pc-region target
+ * u -> 1st half of bl[x] target
+ * v -> 2nd half ob bl[x] target
+ * R -> register list
+ * s -> single precision floating point register
+ * S -> double precision floating point register
+ * m -> Thumb2 modified immediate
+ * n -> complimented Thumb2 modified immediate
+ * M -> Thumb2 16-bit zero-extended immediate
+ * b -> 4-digit binary
+ * N -> append a NOP
+ *
+ * [!] escape. To insert "!", use "!!"
+ */
+/* NOTE: must be kept in sync with enum Mips64Opcode from mips64_lir.h */
+/*
+ * TUNING: We're currently punting on the branch delay slots. All branch
+ * instructions in this map are given a size of 8, which during assembly
+ * is expanded to include a nop. This scheme should be replaced with
+ * an assembler pass to fill those slots when possible.
+ */
+const Mips64EncodingMap Mips64Mir2Lir::EncodingMap[kMips64Last] = {
+ ENCODING_MAP(kMips6432BitData, 0x00000000,
+ kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP,
+ "data", "0x!0h(!0d)", 4),
+ ENCODING_MAP(kMips64Addiu, 0x24000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "addiu", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Addu, 0x00000021,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "addu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64And, 0x00000024,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "and", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Andi, 0x30000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "andi", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64B, 0x10000000,
+ kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
+ "b", "!0t!0N", 8),
+ ENCODING_MAP(kMips64Bal, 0x04110000,
+ kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR |
+ NEEDS_FIXUP, "bal", "!0t!0N", 8),
+ ENCODING_MAP(kMips64Beq, 0x10000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+ NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
+ ENCODING_MAP(kMips64Beqz, 0x10000000, // Same as beq above with t = $zero.
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bgez, 0x04010000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bgez", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bgtz, 0x1c000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bgtz", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Blez, 0x18000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "blez", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bltz, 0x04000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bnez, 0x14000000, // Same as bne below with t = $zero.
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
+ NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
+ ENCODING_MAP(kMips64Bne, 0x14000000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
+ NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
+ ENCODING_MAP(kMips64Break, 0x0000000d,
+ kFmtBitBlt, 25, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP, "break", "!0d", 4),
+ ENCODING_MAP(kMips64Daddiu, 0x64000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "daddiu", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Daddu, 0x0000002d,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "daddu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dahi, 0x04060000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+ "dahi", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Dati, 0x041E0000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+ "dati", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Daui, 0x74000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "daui", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Ddiv, 0x0000009e,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "ddiv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Div, 0x0000009a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmod, 0x000000de,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dmod", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmul, 0x0000009c,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dmul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmfc1, 0x44200000,
+ kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "dmfc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Dmtc1, 0x44a00000,
+ kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "dmtc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Drotr32, 0x0000003e | (1 << 21),
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "drotr32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsll, 0x00000038,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsll", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsll32, 0x0000003c,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsll32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsrl, 0x0000003a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsrl", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsrl32, 0x0000003e,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsrl32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsra, 0x0000003b,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsra", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsra32, 0x0000003f,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsra32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsllv, 0x00000014,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsllv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsrlv, 0x00000016,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsrlv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsrav, 0x00000017,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsrav", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsubu, 0x0000002f,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsubu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Ext, 0x7c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
+ kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
+ "ext", "!0r,!1r,!2d,!3D", 4),
+ ENCODING_MAP(kMips64Faddd, 0x46200000,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMips64Fadds, 0x46000000,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMips64Fdivd, 0x46200003,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMips64Fdivs, 0x46000003,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMips64Fmuld, 0x46200002,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMips64Fmuls, 0x46000002,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMips64Fsubd, 0x46200001,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMips64Fsubs, 0x46000001,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMips64Fcvtsd, 0x46200020,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMips64Fcvtsw, 0x46800020,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.w", "!0s,!1s", 4),
+ ENCODING_MAP(kMips64Fcvtds, 0x46000021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.s", "!0S,!1s", 4),
+ ENCODING_MAP(kMips64Fcvtdw, 0x46800021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.w", "!0S,!1s", 4),
+ ENCODING_MAP(kMips64Fcvtws, 0x46000024,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMips64Fcvtwd, 0x46200024,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMips64Fmovd, 0x46200006,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.d", "!0S,!1S", 4),
+ ENCODING_MAP(kMips64Fmovs, 0x46000006,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMips64Fnegd, 0x46200007,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "neg.d", "!0S,!1S", 4),
+ ENCODING_MAP(kMips64Fnegs, 0x46000007,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "neg.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMips64Fldc1, 0xd4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "ldc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Flwc1, 0xc4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lwc1", "!0s,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Fsdc1, 0xf4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sdc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Fswc1, 0xe4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "swc1", "!0s,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Jal, 0x0c000000,
+ kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
+ "jal", "!0T(!0E)!0N", 8),
+ ENCODING_MAP(kMips64Jalr, 0x00000009,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF0_USE1,
+ "jalr", "!0r,!1r!0N", 8),
+ ENCODING_MAP(kMips64Lahi, 0x3c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "lahi/lui", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Lalo, 0x34000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "lalo/ori", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Lb, 0x80000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lb", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lbu, 0x90000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lbu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Ld, 0xdc000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "ld", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lh, 0x84000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lh", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lhu, 0x94000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lhu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lui, 0x3c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
+ "lui", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Lw, 0x8c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lw", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lwu, 0x9c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lwu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Mfc1, 0x44000000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mfc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Mtc1, 0x44800000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "mtc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Move, 0x0000002d, // Or using zero reg.
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "move", "!0r,!1r", 4),
+ ENCODING_MAP(kMips64Mod, 0x000000da,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mod", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Mul, 0x00000098,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Nop, 0x00000000,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "nop", ";", 4),
+ ENCODING_MAP(kMips64Nor, 0x00000027, // Used for "not" too.
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "nor", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Or, 0x00000025,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "or", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Ori, 0x34000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "ori", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Sb, 0xa0000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sb", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Sd, 0xfc000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sd", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Seb, 0x7c000420,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "seb", "!0r,!1r", 4),
+ ENCODING_MAP(kMips64Seh, 0x7c000620,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "seh", "!0r,!1r", 4),
+ ENCODING_MAP(kMips64Sh, 0xa4000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sh", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Sll, 0x00000000,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "sll", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Sllv, 0x00000004,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sllv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Slt, 0x0000002a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "slt", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Slti, 0x28000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "slti", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Sltu, 0x0000002b,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sltu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Sra, 0x00000003,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "sra", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Srav, 0x00000007,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "srav", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Srl, 0x00000002,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "srl", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Srlv, 0x00000006,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "srlv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Subu, 0x00000023, // Used for "neg" too.
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "subu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Sw, 0xac000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sw", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Sync, 0x0000000f,
+ kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP,
+ "sync", ";", 4),
+ ENCODING_MAP(kMips64Xor, 0x00000026,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "xor", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Xori, 0x38000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "xori", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64CurrPC, 0x04110001,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
+ "addiu", "ra,pc,8", 4),
+ ENCODING_MAP(kMips64Delta, 0x67e00000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
+ kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
+ NEEDS_FIXUP, "daddiu", "!0r,ra,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64DeltaHi, 0x3c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | NEEDS_FIXUP,
+ "lui", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64DeltaLo, 0x34000000,
+ kFmtBlt5_2, 16, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0_USE0 | NEEDS_FIXUP,
+ "ori", "!0r,!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Undefined, 0x64000000,
+ kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, NO_OPERAND,
+ "undefined", "", 4),
+};
+
+
+/*
+ * Convert a short-form branch to long form. Hopefully, this won't happen
+ * very often because the PIC sequence is especially unfortunate.
+ *
+ * Orig conditional branch
+ * -----------------------
+ * beq rs,rt,target
+ *
+ * Long conditional branch
+ * -----------------------
+ * bne rs,rt,hop
+ * bal .+8 ; rRA <- anchor
+ * lui rAT, ((target-anchor) >> 16)
+ * anchor:
+ * ori rAT, rAT, ((target-anchor) & 0xffff)
+ * addu rAT, rAT, rRA
+ * jalr rZERO, rAT
+ * hop:
+ *
+ * Orig unconditional branch
+ * -------------------------
+ * b target
+ *
+ * Long unconditional branch
+ * -----------------------
+ * bal .+8 ; rRA <- anchor
+ * lui rAT, ((target-anchor) >> 16)
+ * anchor:
+ * ori rAT, rAT, ((target-anchor) & 0xffff)
+ * addu rAT, rAT, rRA
+ * jalr rZERO, rAT
+ *
+ *
+ * NOTE: An out-of-range bal isn't supported because it should
+ * never happen with the current PIC model.
+ */
+void Mips64Mir2Lir::ConvertShortToLongBranch(LIR* lir) {
+ // For conditional branches we'll need to reverse the sense
+ bool unconditional = false;
+ int opcode = lir->opcode;
+ int dalvik_offset = lir->dalvik_offset;
+ switch (opcode) {
+ case kMips64Bal:
+ LOG(FATAL) << "long branch and link unsupported";
+ UNREACHABLE();
+ case kMips64B:
+ unconditional = true;
+ break;
+ case kMips64Beq: opcode = kMips64Bne; break;
+ case kMips64Bne: opcode = kMips64Beq; break;
+ case kMips64Beqz: opcode = kMips64Bnez; break;
+ case kMips64Bgez: opcode = kMips64Bltz; break;
+ case kMips64Bgtz: opcode = kMips64Blez; break;
+ case kMips64Blez: opcode = kMips64Bgtz; break;
+ case kMips64Bltz: opcode = kMips64Bgez; break;
+ case kMips64Bnez: opcode = kMips64Beqz; break;
+ default:
+ LOG(FATAL) << "Unexpected branch kind " << opcode;
+ UNREACHABLE();
+ }
+ LIR* hop_target = NULL;
+ if (!unconditional) {
+ hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
+ LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
+ lir->operands[1], 0, 0, 0, hop_target);
+ InsertLIRBefore(lir, hop_branch);
+ }
+ LIR* curr_pc = RawLIR(dalvik_offset, kMips64CurrPC);
+ InsertLIRBefore(lir, curr_pc);
+ LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
+ LIR* delta_hi = RawLIR(dalvik_offset, kMips64DeltaHi, rAT, 0, WrapPointer(anchor), 0, 0,
+ lir->target);
+ InsertLIRBefore(lir, delta_hi);
+ InsertLIRBefore(lir, anchor);
+ LIR* delta_lo = RawLIR(dalvik_offset, kMips64DeltaLo, rAT, 0, WrapPointer(anchor), 0, 0,
+ lir->target);
+ InsertLIRBefore(lir, delta_lo);
+ LIR* addu = RawLIR(dalvik_offset, kMips64Addu, rAT, rAT, rRA);
+ InsertLIRBefore(lir, addu);
+ LIR* jalr = RawLIR(dalvik_offset, kMips64Jalr, rZERO, rAT);
+ InsertLIRBefore(lir, jalr);
+ if (!unconditional) {
+ InsertLIRBefore(lir, hop_target);
+ }
+ NopLIR(lir);
+}
+
+/*
+ * Assemble the LIR into binary instruction format. Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction. In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+AssemblerStatus Mips64Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+ LIR *lir;
+ AssemblerStatus res = kSuccess; // Assume success.
+
+ for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ if (lir->opcode < 0) {
+ continue;
+ }
+
+ if (lir->flags.is_nop) {
+ continue;
+ }
+
+ if (lir->flags.fixup != kFixupNone) {
+ if (lir->opcode == kMips64Delta) {
+ /*
+ * The "Delta" pseudo-ops load the difference between
+ * two pc-relative locations into a the target register
+ * found in operands[0]. The delta is determined by
+ * (label2 - label1), where label1 is a standard
+ * kPseudoTargetLabel and is stored in operands[2].
+ * If operands[3] is null, then label2 is a kPseudoTargetLabel
+ * and is found in lir->target. If operands[3] is non-NULL,
+ * then it is a Switch/Data table.
+ */
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
+ // Fits.
+ lir->operands[1] = delta;
+ } else {
+ // Doesn't fit - must expand to kMips64Delta[Hi|Lo] pair.
+ LIR *new_delta_hi = RawLIR(lir->dalvik_offset, kMips64DeltaHi, lir->operands[0], 0,
+ lir->operands[2], lir->operands[3], 0, lir->target);
+ InsertLIRBefore(lir, new_delta_hi);
+ LIR *new_delta_lo = RawLIR(lir->dalvik_offset, kMips64DeltaLo, lir->operands[0], 0,
+ lir->operands[2], lir->operands[3], 0, lir->target);
+ InsertLIRBefore(lir, new_delta_lo);
+ LIR *new_addu = RawLIR(lir->dalvik_offset, kMips64Daddu, lir->operands[0],
+ lir->operands[0], rRAd);
+ InsertLIRBefore(lir, new_addu);
+ NopLIR(lir);
+ res = kRetryAll;
+ }
+ } else if (lir->opcode == kMips64DeltaLo) {
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = delta & 0xffff;
+ } else if (lir->opcode == kMips64DeltaHi) {
+ int offset1 = (reinterpret_cast<LIR*>(UnwrapPointer(lir->operands[2])))->offset;
+ EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(lir->operands[3]));
+ int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
+ int delta = offset2 - offset1;
+ lir->operands[1] = (delta >> 16) & 0xffff;
+ } else if (lir->opcode == kMips64B || lir->opcode == kMips64Bal) {
+ LIR *target_lir = lir->target;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ ConvertShortToLongBranch(lir);
+ } else {
+ lir->operands[0] = delta >> 2;
+ }
+ } else if (lir->opcode >= kMips64Beqz && lir->opcode <= kMips64Bnez) {
+ LIR *target_lir = lir->target;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ ConvertShortToLongBranch(lir);
+ } else {
+ lir->operands[1] = delta >> 2;
+ }
+ } else if (lir->opcode == kMips64Beq || lir->opcode == kMips64Bne) {
+ LIR *target_lir = lir->target;
+ CodeOffset pc = lir->offset + 4;
+ CodeOffset target = target_lir->offset;
+ int delta = target - pc;
+ if (delta & 0x3) {
+ LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
+ }
+ if (delta > 131068 || delta < -131069) {
+ res = kRetryAll;
+ ConvertShortToLongBranch(lir);
+ } else {
+ lir->operands[2] = delta >> 2;
+ }
+ } else if (lir->opcode == kMips64Jal) {
+ CodeOffset cur_pc = (start_addr + lir->offset + 4) & ~3;
+ CodeOffset target = lir->operands[0];
+ /* ensure PC-region branch can be used */
+ DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
+ if (target & 0x3) {
+ LOG(FATAL) << "Jump target not multiple of 4: " << target;
+ }
+ lir->operands[0] = target >> 2;
+ } else if (lir->opcode == kMips64Lahi) { /* ld address hi (via lui) */
+ LIR *target_lir = lir->target;
+ CodeOffset target = start_addr + target_lir->offset;
+ lir->operands[1] = target >> 16;
+ } else if (lir->opcode == kMips64Lalo) { /* ld address lo (via ori) */
+ LIR *target_lir = lir->target;
+ CodeOffset target = start_addr + target_lir->offset;
+ lir->operands[2] = lir->operands[2] + target;
+ }
+ }
+
+ /*
+ * If one of the pc-relative instructions expanded we'll have
+ * to make another pass. Don't bother to fully assemble the
+ * instruction.
+ */
+ if (res != kSuccess) {
+ continue;
+ }
+ DCHECK(!IsPseudoLirOp(lir->opcode));
+ const Mips64EncodingMap *encoder = &EncodingMap[lir->opcode];
+ uint32_t bits = encoder->skeleton;
+ int i;
+ for (i = 0; i < 4; i++) {
+ uint32_t operand;
+ uint32_t value;
+ operand = lir->operands[i];
+ switch (encoder->field_loc[i].kind) {
+ case kFmtUnused:
+ break;
+ case kFmtBitBlt:
+ if (encoder->field_loc[i].start == 0 && encoder->field_loc[i].end == 31) {
+ value = operand;
+ } else {
+ value = (operand << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ }
+ bits |= value;
+ break;
+ case kFmtBlt5_2:
+ value = (operand & 0x1f);
+ bits |= (value << encoder->field_loc[i].start);
+ bits |= (value << encoder->field_loc[i].end);
+ break;
+ case kFmtDfp: {
+ // TODO: do we need to adjust now that we're using 64BitSolo?
+ DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
+ value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ }
+ case kFmtSfp:
+ DCHECK(RegStorage::IsSingle(operand)) << ", Operand = 0x" << std::hex << operand;
+ value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
+ bits |= value;
+ break;
+ default:
+ LOG(FATAL) << "Bad encoder format: " << encoder->field_loc[i].kind;
+ }
+ }
+ // We only support little-endian MIPS64.
+ code_buffer_.push_back(bits & 0xff);
+ code_buffer_.push_back((bits >> 8) & 0xff);
+ code_buffer_.push_back((bits >> 16) & 0xff);
+ code_buffer_.push_back((bits >> 24) & 0xff);
+ // TUNING: replace with proper delay slot handling.
+ if (encoder->size == 8) {
+ DCHECK(!IsPseudoLirOp(lir->opcode));
+ const Mips64EncodingMap *encoder2 = &EncodingMap[kMips64Nop];
+ uint32_t bits2 = encoder2->skeleton;
+ code_buffer_.push_back(bits2 & 0xff);
+ code_buffer_.push_back((bits2 >> 8) & 0xff);
+ code_buffer_.push_back((bits2 >> 16) & 0xff);
+ code_buffer_.push_back((bits2 >> 24) & 0xff);
+ }
+ }
+ return res;
+}
+
+size_t Mips64Mir2Lir::GetInsnSize(LIR* lir) {
+ DCHECK(!IsPseudoLirOp(lir->opcode));
+ return EncodingMap[lir->opcode].size;
+}
+
+// LIR offset assignment.
+// TODO: consolidate w/ Arm assembly mechanism.
+int Mips64Mir2Lir::AssignInsnOffsets() {
+ LIR* lir;
+ int offset = 0;
+
+ for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+ lir->offset = offset;
+ if (LIKELY(lir->opcode >= 0)) {
+ if (!lir->flags.is_nop) {
+ offset += lir->flags.size;
+ }
+ } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
+ if (offset & 0x2) {
+ offset += 2;
+ lir->operands[0] = 1;
+ } else {
+ lir->operands[0] = 0;
+ }
+ }
+ // Pseudo opcodes don't consume space.
+ }
+ return offset;
+}
+
+/*
+ * Walk the compilation unit and assign offsets to instructions
+ * and literals and compute the total size of the compiled unit.
+ * TODO: consolidate w/ Arm assembly mechanism.
+ */
+void Mips64Mir2Lir::AssignOffsets() {
+ int offset = AssignInsnOffsets();
+
+ // Const values have to be word aligned.
+ offset = RoundUp(offset, 4);
+
+ // Set up offsets for literals.
+ data_offset_ = offset;
+
+ offset = AssignLiteralOffset(offset);
+
+ offset = AssignSwitchTablesOffset(offset);
+
+ offset = AssignFillArrayDataOffset(offset);
+
+ total_size_ = offset;
+}
+
+/*
+ * Go over each instruction in the list and calculate the offset from the top
+ * before sending them off to the assembler. If out-of-range branch distance is
+ * seen rearrange the instructions a bit to correct it.
+ * TODO: consolidate w/ Arm assembly mechanism.
+ */
+void Mips64Mir2Lir::AssembleLIR() {
+ cu_->NewTimingSplit("Assemble");
+ AssignOffsets();
+ int assembler_retries = 0;
+ /*
+ * Assemble here. Note that we generate code with optimistic assumptions
+ * and if found now to work, we'll have to redo the sequence and retry.
+ */
+
+ while (true) {
+ AssemblerStatus res = AssembleInstructions(0);
+ if (res == kSuccess) {
+ break;
+ } else {
+ assembler_retries++;
+ if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
+ CodegenDump();
+ LOG(FATAL) << "Assembler error - too many retries";
+ }
+ // Redo offsets and try again.
+ AssignOffsets();
+ code_buffer_.clear();
+ }
+ }
+
+ // Install literals.
+ InstallLiteralPools();
+
+ // Install switch tables.
+ InstallSwitchTables();
+
+ // Install fill array data.
+ InstallFillArrayData();
+
+ // Create the mapping table and native offset to reference map.
+ cu_->NewTimingSplit("PcMappingTable");
+ CreateMappingTables();
+
+ cu_->NewTimingSplit("GcMap");
+ CreateNativeGcMap();
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/backend_mips64.h b/compiler/dex/quick/mips64/backend_mips64.h
new file mode 100644
index 0000000000..cc30ae06d8
--- /dev/null
+++ b/compiler/dex/quick/mips64/backend_mips64.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
+#define ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
+
+namespace art {
+
+struct CompilationUnit;
+class Mir2Lir;
+class MIRGraph;
+class ArenaAllocator;
+
+Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena);
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/call_mips64.cc b/compiler/dex/quick/mips64/call_mips64.cc
new file mode 100644
index 0000000000..63cef7e348
--- /dev/null
+++ b/compiler/dex/quick/mips64/call_mips64.cc
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips64 ISA */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
+#include "mips64_lir.h"
+#include "mirror/art_method.h"
+#include "mirror/object_array-inl.h"
+
+namespace art {
+
+bool Mips64Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
+ // TODO
+ UNUSED(bb, mir, special);
+ return false;
+}
+
+/*
+ * The lack of pc-relative loads on Mips64 presents somewhat of a challenge
+ * for our PIC switch table strategy. To materialize the current location
+ * we'll do a dummy JAL and reference our tables using rRA as the
+ * base register. Note that rRA will be used both as the base to
+ * locate the switch table data and as the reference base for the switch
+ * target offsets stored in the table. We'll use a special pseudo-instruction
+ * to represent the jal and trigger the construction of the
+ * switch table offsets (which will happen after final assembly and all
+ * labels are fixed).
+ *
+ * The test loop will look something like:
+ *
+ * ori r_end, rZERO, #table_size ; size in bytes
+ * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
+ * nop ; opportunistically fill
+ * BaseLabel:
+ * addiu r_base, rRA, <table> - <BaseLabel> ; table relative to BaseLabel
+ addu r_end, r_end, r_base ; end of table
+ * lw r_val, [rSP, v_reg_off] ; Test Value
+ * loop:
+ * beq r_base, r_end, done
+ * lw r_key, 0(r_base)
+ * addu r_base, 8
+ * bne r_val, r_key, loop
+ * lw r_disp, -4(r_base)
+ * addu rRA, r_disp
+ * jalr rZERO, rRA
+ * done:
+ *
+ */
+void Mips64Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+ const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
+ // Add the table to the list - we'll process it later.
+ SwitchTable* tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),
+ kArenaAllocData));
+ tab_rec->switch_mir = mir;
+ tab_rec->table = table;
+ tab_rec->vaddr = current_dalvik_offset_;
+ int elements = table[1];
+ switch_tables_.push_back(tab_rec);
+
+ // The table is composed of 8-byte key/disp pairs.
+ int byte_size = elements * 8;
+
+ int size_hi = byte_size >> 16;
+ int size_lo = byte_size & 0xffff;
+
+ RegStorage r_end = AllocTempWide();
+ if (size_hi) {
+ NewLIR2(kMips64Lui, r_end.GetReg(), size_hi);
+ }
+ // Must prevent code motion for the curr pc pair.
+ GenBarrier(); // Scheduling barrier.
+ NewLIR0(kMips64CurrPC); // Really a jal to .+8.
+ // Now, fill the branch delay slot.
+ if (size_hi) {
+ NewLIR3(kMips64Ori, r_end.GetReg(), r_end.GetReg(), size_lo);
+ } else {
+ NewLIR3(kMips64Ori, r_end.GetReg(), rZERO, size_lo);
+ }
+ GenBarrier(); // Scheduling barrier.
+
+ // Construct BaseLabel and set up table base register.
+ LIR* base_label = NewLIR0(kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later.
+ tab_rec->anchor = base_label;
+ RegStorage r_base = AllocTempWide();
+ NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
+ OpRegRegReg(kOpAdd, r_end, r_end, r_base);
+
+ // Grab switch test value.
+ rl_src = LoadValue(rl_src, kCoreReg);
+
+ // Test loop.
+ RegStorage r_key = AllocTemp();
+ LIR* loop_label = NewLIR0(kPseudoTargetLabel);
+ LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
+ Load32Disp(r_base, 0, r_key);
+ OpRegImm(kOpAdd, r_base, 8);
+ OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
+ RegStorage r_disp = AllocTemp();
+ Load32Disp(r_base, -4, r_disp);
+ OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
+ OpReg(kOpBx, TargetReg(kLr, kWide));
+
+ // Loop exit.
+ LIR* exit_label = NewLIR0(kPseudoTargetLabel);
+ exit_branch->target = exit_label;
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ * lw r_val
+ * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
+ * nop ; opportunistically fill
+ * [subiu r_val, bias] ; Remove bias if low_val != 0
+ * bound check -> done
+ * lw r_disp, [rRA, r_val]
+ * addu rRA, r_disp
+ * jalr rZERO, rRA
+ * done:
+ */
+void Mips64Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+ const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
+ // Add the table to the list - we'll process it later.
+ SwitchTable* tab_rec =
+ static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
+ tab_rec->switch_mir = mir;
+ tab_rec->table = table;
+ tab_rec->vaddr = current_dalvik_offset_;
+ int size = table[1];
+ switch_tables_.push_back(tab_rec);
+
+ // Get the switch value.
+ rl_src = LoadValue(rl_src, kCoreReg);
+
+ // Prepare the bias. If too big, handle 1st stage here.
+ int low_key = s4FromSwitchData(&table[2]);
+ bool large_bias = false;
+ RegStorage r_key;
+ if (low_key == 0) {
+ r_key = rl_src.reg;
+ } else if ((low_key & 0xffff) != low_key) {
+ r_key = AllocTemp();
+ LoadConstant(r_key, low_key);
+ large_bias = true;
+ } else {
+ r_key = AllocTemp();
+ }
+
+ // Must prevent code motion for the curr pc pair.
+ GenBarrier();
+ NewLIR0(kMips64CurrPC); // Really a jal to .+8.
+ // Now, fill the branch delay slot with bias strip.
+ if (low_key == 0) {
+ NewLIR0(kMips64Nop);
+ } else {
+ if (large_bias) {
+ OpRegRegReg(kOpSub, r_key, rl_src.reg, r_key);
+ } else {
+ OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
+ }
+ }
+ GenBarrier(); // Scheduling barrier.
+
+ // Construct BaseLabel and set up table base register.
+ LIR* base_label = NewLIR0(kPseudoTargetLabel);
+ // Remember base label so offsets can be computed later.
+ tab_rec->anchor = base_label;
+
+ // Bounds check - if < 0 or >= size continue following switch.
+ LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
+
+ // Materialize the table base pointer.
+ RegStorage r_base = AllocTempWide();
+ NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
+
+ // Load the displacement from the switch table.
+ RegStorage r_disp = AllocTemp();
+ LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
+
+ // Add to rAP and go.
+ OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
+ OpReg(kOpBx, TargetReg(kLr, kWide));
+
+ // Branch_over target here.
+ LIR* target = NewLIR0(kPseudoTargetLabel);
+ branch_over->target = target;
+}
+
+void Mips64Mir2Lir::GenMoveException(RegLocation rl_dest) {
+ int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
+ RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
+ RegStorage reset_reg = AllocTempRef();
+ LoadRefDisp(rs_rMIPS64_SELF, ex_offset, rl_result.reg, kNotVolatile);
+ LoadConstant(reset_reg, 0);
+ StoreRefDisp(rs_rMIPS64_SELF, ex_offset, reset_reg, kNotVolatile);
+ FreeTemp(reset_reg);
+ StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
+ RegStorage reg_card_base = AllocTempWide();
+ RegStorage reg_card_no = AllocTempWide();
+ // NOTE: native pointer.
+ LoadWordDisp(rs_rMIPS64_SELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
+ OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+ StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 0, kUnsignedByte);
+ FreeTemp(reg_card_base);
+ FreeTemp(reg_card_no);
+}
+
+void Mips64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
+ int spill_count = num_core_spills_ + num_fp_spills_;
+ /*
+ * On entry, rMIPS64_ARG0, rMIPS64_ARG1, rMIPS64_ARG2, rMIPS64_ARG3,
+ * rMIPS64_ARG4, rMIPS64_ARG5, rMIPS64_ARG6 & rMIPS64_ARG7 are live.
+ * Let the register allocation mechanism know so it doesn't try to
+ * use any of them when expanding the frame or flushing.
+ */
+ LockTemp(rs_rMIPS64_ARG0);
+ LockTemp(rs_rMIPS64_ARG1);
+ LockTemp(rs_rMIPS64_ARG2);
+ LockTemp(rs_rMIPS64_ARG3);
+ LockTemp(rs_rMIPS64_ARG4);
+ LockTemp(rs_rMIPS64_ARG5);
+ LockTemp(rs_rMIPS64_ARG6);
+ LockTemp(rs_rMIPS64_ARG7);
+
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_,
+ kMips64);
+ NewLIR0(kPseudoMethodEntry);
+ RegStorage check_reg = AllocTempWide();
+ RegStorage new_sp = AllocTempWide();
+ if (!skip_overflow_check) {
+ // Load stack limit.
+ LoadWordDisp(rs_rMIPS64_SELF, Thread::StackEndOffset<8>().Int32Value(), check_reg);
+ }
+ // Spill core callee saves.
+ SpillCoreRegs();
+ // NOTE: promotion of FP regs currently unsupported, thus no FP spill.
+ DCHECK_EQ(num_fp_spills_, 0);
+ const int frame_sub = frame_size_ - spill_count * 8;
+ if (!skip_overflow_check) {
+ class StackOverflowSlowPath : public LIRSlowPath {
+ public:
+ StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) {
+ }
+ void Compile() OVERRIDE {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ GenerateTargetLabel(kPseudoThrowTarget);
+ // Load RA from the top of the frame.
+ m2l_->LoadWordDisp(rs_rMIPS64_SP, sp_displace_ - 8, rs_rRAd);
+ m2l_->OpRegImm(kOpAdd, rs_rMIPS64_SP, sp_displace_);
+ m2l_->ClobberCallerSave();
+ RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow); // Doesn't clobber LR.
+ m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
+ false /* UseLink */);
+ }
+
+ private:
+ const size_t sp_displace_;
+ };
+ OpRegRegImm(kOpSub, new_sp, rs_rMIPS64_SP, frame_sub);
+ LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
+ AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 8));
+ // TODO: avoid copy for small frame sizes.
+ OpRegCopy(rs_rMIPS64_SP, new_sp); // Establish stack.
+ } else {
+ OpRegImm(kOpSub, rs_rMIPS64_SP, frame_sub);
+ }
+
+ FlushIns(ArgLocs, rl_method);
+
+ FreeTemp(rs_rMIPS64_ARG0);
+ FreeTemp(rs_rMIPS64_ARG1);
+ FreeTemp(rs_rMIPS64_ARG2);
+ FreeTemp(rs_rMIPS64_ARG3);
+ FreeTemp(rs_rMIPS64_ARG4);
+ FreeTemp(rs_rMIPS64_ARG5);
+ FreeTemp(rs_rMIPS64_ARG6);
+ FreeTemp(rs_rMIPS64_ARG7);
+}
+
+void Mips64Mir2Lir::GenExitSequence() {
+ /*
+ * In the exit path, rMIPS64_RET0/rMIPS64_RET1 are live - make sure they aren't
+ * allocated by the register utilities as temps.
+ */
+ LockTemp(rs_rMIPS64_RET0);
+ LockTemp(rs_rMIPS64_RET1);
+
+ NewLIR0(kPseudoMethodExit);
+ UnSpillCoreRegs();
+ OpReg(kOpBx, rs_rRAd);
+}
+
+void Mips64Mir2Lir::GenSpecialExitSequence() {
+ OpReg(kOpBx, rs_rRAd);
+}
+
+void Mips64Mir2Lir::GenSpecialEntryForSuspend() {
+ // Keep 16-byte stack alignment - push A0, i.e. ArtMethod* and RA.
+ core_spill_mask_ = (1u << rs_rRAd.GetRegNum());
+ num_core_spills_ = 1u;
+ fp_spill_mask_ = 0u;
+ num_fp_spills_ = 0u;
+ frame_size_ = 16u;
+ core_vmap_table_.clear();
+ fp_vmap_table_.clear();
+ OpRegImm(kOpSub, rs_rMIPS64_SP, frame_size_);
+ StoreWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
+ StoreWordDisp(rs_rMIPS64_SP, 0, rs_rA0d);
+}
+
+void Mips64Mir2Lir::GenSpecialExitForSuspend() {
+ // Pop the frame. Don't pop ArtMethod*, it's no longer needed.
+ LoadWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
+ OpRegImm(kOpAdd, rs_rMIPS64_SP, frame_size_);
+}
+
+/*
+ * Bit of a hack here - in the absence of a real scheduling pass,
+ * emit the next instruction in static & direct invoke sequences.
+ */
+static int Mips64NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
+ const MethodReference& target_method, uint32_t,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type) {
+ Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
+ if (direct_code != 0 && direct_method != 0) {
+ switch (state) {
+ case 0: // Get the current Method* [sets kArg0]
+ if (direct_code != static_cast<uintptr_t>(-1)) {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ cg->LoadCodeAddress(target_method, type, kInvokeTgt);
+ }
+ if (direct_method != static_cast<uintptr_t>(-1)) {
+ cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ } else {
+ cg->LoadMethodAddress(target_method, type, kArg0);
+ }
+ break;
+ default:
+ return -1;
+ }
+ } else {
+ RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
+ switch (state) {
+ case 0: // Get the current Method* [sets kArg0]
+ // TUNING: we can save a reg copy if Method* has been promoted.
+ cg->LoadCurrMethodDirect(arg0_ref);
+ break;
+ case 1: // Get method->dex_cache_resolved_methods_
+ cg->LoadRefDisp(arg0_ref, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
+ arg0_ref, kNotVolatile);
+ // Set up direct code if known.
+ if (direct_code != 0) {
+ if (direct_code != static_cast<uintptr_t>(-1)) {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
+ cg->LoadCodeAddress(target_method, type, kInvokeTgt);
+ }
+ }
+ break;
+ case 2: // Grab target method*
+ CHECK_EQ(cu->dex_file, target_method.dex_file);
+ cg->LoadRefDisp(arg0_ref, mirror::ObjectArray<mirror::Object>::
+ OffsetOfElement(target_method.dex_method_index).Int32Value(), arg0_ref,
+ kNotVolatile);
+ break;
+ case 3: // Grab the code from the method*
+ if (direct_code == 0) {
+ int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+ InstructionSetPointerSize(cu->instruction_set)).Int32Value();
+ // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
+ cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
+ }
+ break;
+ default:
+ return -1;
+ }
+ }
+ return state + 1;
+}
+
+NextCallInsn Mips64Mir2Lir::GetNextSDCallInsn() {
+ return Mips64NextSDCallInsn;
+}
+
+LIR* Mips64Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info ATTRIBUTE_UNUSED) {
+ return OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/codegen_mips64.h b/compiler/dex/quick/mips64/codegen_mips64.h
new file mode 100644
index 0000000000..57c30d8b4b
--- /dev/null
+++ b/compiler/dex/quick/mips64/codegen_mips64.h
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
+#define ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
+
+#include "dex/quick/mir_to_lir.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+struct CompilationUnit;
+
+class Mips64Mir2Lir FINAL : public Mir2Lir {
+ protected:
+ class InToRegStorageMips64Mapper : public InToRegStorageMapper {
+ public:
+ explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {}
+ virtual RegStorage GetNextReg(ShortyArg arg);
+ virtual void Reset() OVERRIDE {
+ cur_arg_reg_ = 0;
+ }
+ protected:
+ Mir2Lir* m2l_;
+ private:
+ size_t cur_arg_reg_;
+ };
+
+ InToRegStorageMips64Mapper in_to_reg_storage_mips64_mapper_;
+ InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
+ in_to_reg_storage_mips64_mapper_.Reset();
+ return &in_to_reg_storage_mips64_mapper_;
+ }
+
+ public:
+ Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+
+ // Required for target - codegen utilities.
+ bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+ RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, int32_t constant)
+ OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, int64_t constant)
+ OVERRIDE;
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
+ RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+ LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+ OpSize size) OVERRIDE;
+ LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+ LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+ OpSize size) OVERRIDE;
+ LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+ LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
+
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
+
+ // Required for target - register utilities.
+ RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+ RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE {
+ if (wide_kind == kWide || wide_kind == kRef) {
+ return As64BitReg(TargetReg(reg));
+ } else {
+ return Check32BitReg(TargetReg(reg));
+ }
+ }
+ RegStorage TargetPtrReg(SpecialTargetRegister reg) OVERRIDE {
+ return As64BitReg(TargetReg(reg));
+ }
+ RegLocation GetReturnAlt();
+ RegLocation GetReturnWideAlt();
+ RegLocation LocCReturn();
+ RegLocation LocCReturnRef();
+ RegLocation LocCReturnDouble();
+ RegLocation LocCReturnFloat();
+ RegLocation LocCReturnWide();
+ ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+ void AdjustSpillMask();
+ void ClobberCallerSave();
+ void FreeCallTemps();
+ void LockCallTemps();
+ void CompilerInitializeRegAlloc();
+
+ // Required for target - miscellaneous.
+ void AssembleLIR();
+ int AssignInsnOffsets();
+ void AssignOffsets();
+ AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+ void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+ void SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+ ResourceMask* def_mask) OVERRIDE;
+ const char* GetTargetInstFmt(int opcode);
+ const char* GetTargetInstName(int opcode);
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+ uint64_t GetTargetInstFlags(int opcode);
+ size_t GetInsnSize(LIR* lir) OVERRIDE;
+ bool IsUnconditionalBranch(LIR* lir);
+
+ // Get the register class for load/store of a field.
+ RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+
+ // Required for target - Dalvik-level generators.
+ void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation lr_shift);
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, int flags);
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_dest, int scale);
+ void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_src, int scale, bool card_mark);
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift, int flags);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+ bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+ bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
+ bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
+ bool GenInlinedSqrt(CallInfo* info);
+ bool GenInlinedPeek(CallInfo* info, OpSize size);
+ bool GenInlinedPoke(CallInfo* info, OpSize size);
+ void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, int flags) OVERRIDE;
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivZeroCheckWide(RegStorage reg);
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+ void GenExitSequence();
+ void GenSpecialExitSequence() OVERRIDE;
+ void GenSpecialEntryForSuspend() OVERRIDE;
+ void GenSpecialExitForSuspend() OVERRIDE;
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+ void GenSelect(BasicBlock* bb, MIR* mir);
+ void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ RegisterClass dest_reg_class) OVERRIDE;
+ bool GenMemBarrier(MemBarrierKind barrier_kind);
+ void GenMoveException(RegLocation rl_dest);
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ int first_bit, int second_bit);
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+ void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
+
+ // Required for target - single operation generators.
+ LIR* OpUnconditionalBranch(LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
+ LIR* OpCondBranch(ConditionCode cc, LIR* target);
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpIT(ConditionCode cond, const char* guide);
+ void OpEndIT(LIR* it);
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+ LIR* OpPcRelLoad(RegStorage reg, LIR* target);
+ LIR* OpReg(OpKind op, RegStorage r_dest_src);
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
+ LIR* OpTestSuspend(LIR* target);
+ LIR* OpVldm(RegStorage r_base, int count);
+ LIR* OpVstm(RegStorage r_base, int count);
+ void OpRegCopyWide(RegStorage dest, RegStorage src);
+
+ // TODO: collapse r_dest.
+ LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+ // TODO: collapse r_src.
+ LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+ void SpillCoreRegs();
+ void UnSpillCoreRegs();
+ static const Mips64EncodingMap EncodingMap[kMips64Last];
+ bool InexpensiveConstantInt(int32_t value);
+ bool InexpensiveConstantFloat(int32_t value);
+ bool InexpensiveConstantLong(int64_t value);
+ bool InexpensiveConstantDouble(int64_t value);
+
+ bool WideGPRsAreAliases() const OVERRIDE {
+ return true; // 64b architecture.
+ }
+ bool WideFPRsAreAliases() const OVERRIDE {
+ return true; // 64b architecture.
+ }
+
+ LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+ RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, int flags) OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
+ OVERRIDE;
+ NextCallInsn GetNextSDCallInsn() OVERRIDE;
+ LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
+ // Unimplemented intrinsics.
+ bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
+ OVERRIDE {
+ return false;
+ }
+
+ private:
+ void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div, int flags);
+ void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+ RegisterClass reg_class);
+
+ void ConvertShortToLongBranch(LIR* lir);
+
+ /**
+ * @param reg #RegStorage containing a Solo64 input register (e.g. @c a1 or @c d0).
+ * @return A Solo32 with the same register number as the @p reg (e.g. @c a1 or @c f0).
+ * @see As64BitReg
+ */
+ RegStorage As32BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 64b register";
+ } else {
+ LOG(WARNING) << "Expected 64b register";
+ return reg;
+ }
+ }
+ RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
+
+ RegStorage Check32BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 32b register";
+ } else {
+ LOG(WARNING) << "Checked for 32b register";
+ return As32BitReg(reg);
+ }
+ }
+ return reg;
+ }
+
+ /**
+ * @param reg #RegStorage containing a Solo32 input register (e.g. @c a1 or @c f0).
+ * @return A Solo64 with the same register number as the @p reg (e.g. @c a1 or @c d0).
+ */
+ RegStorage As64BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 32b register";
+ } else {
+ LOG(WARNING) << "Expected 32b register";
+ return reg;
+ }
+ }
+ RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
+
+ RegStorage Check64BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 64b register";
+ } else {
+ LOG(WARNING) << "Checked for 64b register";
+ return As64BitReg(reg);
+ }
+ }
+ return reg;
+ }
+
+ void GenBreakpoint(int code);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/fp_mips64.cc b/compiler/dex/quick/mips64/fp_mips64.cc
new file mode 100644
index 0000000000..5c8ee9ccb8
--- /dev/null
+++ b/compiler/dex/quick/mips64/fp_mips64.cc
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+void Mips64Mir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
+ int op = kMips64Nop;
+ RegLocation rl_result;
+
+ /*
+ * Don't attempt to optimize register usage since these opcodes call out to
+ * the handlers.
+ */
+ switch (opcode) {
+ case Instruction::ADD_FLOAT_2ADDR:
+ case Instruction::ADD_FLOAT:
+ op = kMips64Fadds;
+ break;
+ case Instruction::SUB_FLOAT_2ADDR:
+ case Instruction::SUB_FLOAT:
+ op = kMips64Fsubs;
+ break;
+ case Instruction::DIV_FLOAT_2ADDR:
+ case Instruction::DIV_FLOAT:
+ op = kMips64Fdivs;
+ break;
+ case Instruction::MUL_FLOAT_2ADDR:
+ case Instruction::MUL_FLOAT:
+ op = kMips64Fmuls;
+ break;
+ case Instruction::REM_FLOAT_2ADDR:
+ case Instruction::REM_FLOAT:
+ FlushAllRegs(); // Send everything to home location.
+ CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
+ rl_result = GetReturn(kFPReg);
+ StoreValue(rl_dest, rl_result);
+ return;
+ case Instruction::NEG_FLOAT:
+ GenNegFloat(rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ rl_src1 = LoadValue(rl_src1, kFPReg);
+ rl_src2 = LoadValue(rl_src2, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
+ int op = kMips64Nop;
+ RegLocation rl_result;
+
+ switch (opcode) {
+ case Instruction::ADD_DOUBLE_2ADDR:
+ case Instruction::ADD_DOUBLE:
+ op = kMips64Faddd;
+ break;
+ case Instruction::SUB_DOUBLE_2ADDR:
+ case Instruction::SUB_DOUBLE:
+ op = kMips64Fsubd;
+ break;
+ case Instruction::DIV_DOUBLE_2ADDR:
+ case Instruction::DIV_DOUBLE:
+ op = kMips64Fdivd;
+ break;
+ case Instruction::MUL_DOUBLE_2ADDR:
+ case Instruction::MUL_DOUBLE:
+ op = kMips64Fmuld;
+ break;
+ case Instruction::REM_DOUBLE_2ADDR:
+ case Instruction::REM_DOUBLE:
+ FlushAllRegs(); // Send everything to home location.
+ CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(kFPReg);
+ StoreValueWide(rl_dest, rl_result);
+ return;
+ case Instruction::NEG_DOUBLE:
+ GenNegDouble(rl_dest, rl_src1);
+ return;
+ default:
+ LOG(FATAL) << "Unpexpected opcode: " << opcode;
+ }
+ rl_src1 = LoadValueWide(rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_src2 = LoadValueWide(rl_src2, kFPReg);
+ DCHECK(rl_src2.wide);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
+ int32_t constant) {
+ // TODO: need mips64 implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips64";
+}
+
+void Mips64Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
+ int64_t constant) {
+ // TODO: need mips64 implementation.
+ UNUSED(rl_dest, rl_src1, constant);
+ LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips64";
+}
+
+void Mips64Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src) {
+ int op = kMips64Nop;
+ RegLocation rl_result;
+ switch (opcode) {
+ case Instruction::INT_TO_FLOAT:
+ op = kMips64Fcvtsw;
+ break;
+ case Instruction::DOUBLE_TO_FLOAT:
+ op = kMips64Fcvtsd;
+ break;
+ case Instruction::FLOAT_TO_DOUBLE:
+ op = kMips64Fcvtds;
+ break;
+ case Instruction::INT_TO_DOUBLE:
+ op = kMips64Fcvtdw;
+ break;
+ case Instruction::FLOAT_TO_INT:
+ GenConversionCall(kQuickF2iz, rl_dest, rl_src, kCoreReg);
+ return;
+ case Instruction::DOUBLE_TO_INT:
+ GenConversionCall(kQuickD2iz, rl_dest, rl_src, kCoreReg);
+ return;
+ case Instruction::LONG_TO_DOUBLE:
+ GenConversionCall(kQuickL2d, rl_dest, rl_src, kFPReg);
+ return;
+ case Instruction::FLOAT_TO_LONG:
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
+ return;
+ case Instruction::LONG_TO_FLOAT:
+ GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
+ return;
+ case Instruction::DOUBLE_TO_LONG:
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
+ return;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ }
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ } else {
+ rl_src = LoadValue(rl_src, kFPReg);
+ }
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ if (rl_dest.wide) {
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ StoreValue(rl_dest, rl_result);
+ }
+}
+
+void Mips64Mir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ bool wide = true;
+ QuickEntrypointEnum target;
+
+ switch (opcode) {
+ case Instruction::CMPL_FLOAT:
+ target = kQuickCmplFloat;
+ wide = false;
+ break;
+ case Instruction::CMPG_FLOAT:
+ target = kQuickCmpgFloat;
+ wide = false;
+ break;
+ case Instruction::CMPL_DOUBLE:
+ target = kQuickCmplDouble;
+ break;
+ case Instruction::CMPG_DOUBLE:
+ target = kQuickCmpgDouble;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected opcode: " << opcode;
+ target = kQuickCmplFloat;
+ }
+ FlushAllRegs();
+ LockCallTemps();
+ if (wide) {
+ RegStorage r_tmp1(RegStorage::k64BitSolo, rMIPS64_FARG0);
+ RegStorage r_tmp2(RegStorage::k64BitSolo, rMIPS64_FARG1);
+ LoadValueDirectWideFixed(rl_src1, r_tmp1);
+ LoadValueDirectWideFixed(rl_src2, r_tmp2);
+ } else {
+ LoadValueDirectFixed(rl_src1, rs_rMIPS64_FARG0);
+ LoadValueDirectFixed(rl_src2, rs_rMIPS64_FARG1);
+ }
+ RegStorage r_tgt = LoadHelper(target);
+ // NOTE: not a safepoint.
+ OpReg(kOpBlx, r_tgt);
+ RegLocation rl_result = GetReturn(kCoreReg);
+ StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
+ UNUSED(bb, mir, gt_bias, is_double);
+ UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
+}
+
+void Mips64Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
+ RegLocation rl_result;
+ rl_src = LoadValue(rl_src, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(kMips64Fnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValue(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
+ RegLocation rl_result;
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ rl_result = EvalLocWide(rl_dest, kFPReg, true);
+ NewLIR2(kMips64Fnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+bool Mips64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
+ // TODO: need Mips64 implementation.
+ UNUSED(info, is_min, is_long);
+ return false;
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/int_mips64.cc b/compiler/dex/quick/mips64/int_mips64.cc
new file mode 100644
index 0000000000..9023970519
--- /dev/null
+++ b/compiler/dex/quick/mips64/int_mips64.cc
@@ -0,0 +1,694 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the Mips64 ISA */
+
+#include "codegen_mips64.h"
+
+#include "base/logging.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "dex/reg_storage_eq.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "mips64_lir.h"
+#include "mirror/array-inl.h"
+
+namespace art {
+
+/*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * slt temp, x, y; # (x < y) ? 1:0
+ * slt res, y, x; # (x > y) ? 1:0
+ * subu res, res, temp; # res = -1:1:0 for [ < > = ]
+ *
+ */
+void Mips64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegStorage temp = AllocTempWide();
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(kMips64Slt, temp.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR3(kMips64Slt, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ NewLIR3(kMips64Subu, rl_result.reg.GetReg(), rl_result.reg.GetReg(), temp.GetReg());
+ FreeTemp(temp);
+ StoreValue(rl_dest, rl_result);
+}
+
+LIR* Mips64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
+ LIR* branch;
+ Mips64OpCode slt_op;
+ Mips64OpCode br_op;
+ bool cmp_zero = false;
+ bool swapped = false;
+ switch (cond) {
+ case kCondEq:
+ br_op = kMips64Beq;
+ cmp_zero = true;
+ break;
+ case kCondNe:
+ br_op = kMips64Bne;
+ cmp_zero = true;
+ break;
+ case kCondUlt:
+ slt_op = kMips64Sltu;
+ br_op = kMips64Bnez;
+ break;
+ case kCondUge:
+ slt_op = kMips64Sltu;
+ br_op = kMips64Beqz;
+ break;
+ case kCondGe:
+ slt_op = kMips64Slt;
+ br_op = kMips64Beqz;
+ break;
+ case kCondGt:
+ slt_op = kMips64Slt;
+ br_op = kMips64Bnez;
+ swapped = true;
+ break;
+ case kCondLe:
+ slt_op = kMips64Slt;
+ br_op = kMips64Beqz;
+ swapped = true;
+ break;
+ case kCondLt:
+ slt_op = kMips64Slt;
+ br_op = kMips64Bnez;
+ break;
+ case kCondHi: // Gtu
+ slt_op = kMips64Sltu;
+ br_op = kMips64Bnez;
+ swapped = true;
+ break;
+ default:
+ LOG(FATAL) << "No support for ConditionCode: " << cond;
+ return NULL;
+ }
+ if (cmp_zero) {
+ branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
+ } else {
+ RegStorage t_reg = AllocTemp();
+ if (swapped) {
+ NewLIR3(slt_op, t_reg.GetReg(), src2.GetReg(), src1.GetReg());
+ } else {
+ NewLIR3(slt_op, t_reg.GetReg(), src1.GetReg(), src2.GetReg());
+ }
+ branch = NewLIR1(br_op, t_reg.GetReg());
+ FreeTemp(t_reg);
+ }
+ branch->target = target;
+ return branch;
+}
+
+LIR* Mips64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg,
+ int check_value, LIR* target) {
+ LIR* branch;
+ if (check_value != 0) {
+ // TUNING: handle s16 & kCondLt/Mi case using slti.
+ RegStorage t_reg = AllocTemp();
+ LoadConstant(t_reg, check_value);
+ branch = OpCmpBranch(cond, reg, t_reg, target);
+ FreeTemp(t_reg);
+ return branch;
+ }
+ Mips64OpCode opc;
+ switch (cond) {
+ case kCondEq: opc = kMips64Beqz; break;
+ case kCondGe: opc = kMips64Bgez; break;
+ case kCondGt: opc = kMips64Bgtz; break;
+ case kCondLe: opc = kMips64Blez; break;
+ // case KCondMi:
+ case kCondLt: opc = kMips64Bltz; break;
+ case kCondNe: opc = kMips64Bnez; break;
+ default:
+ // Tuning: use slti when applicable.
+ RegStorage t_reg = AllocTemp();
+ LoadConstant(t_reg, check_value);
+ branch = OpCmpBranch(cond, reg, t_reg, target);
+ FreeTemp(t_reg);
+ return branch;
+ }
+ branch = NewLIR1(opc, reg.GetReg());
+ branch->target = target;
+ return branch;
+}
+
+LIR* Mips64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
+ DCHECK(!r_dest.IsPair() && !r_src.IsPair());
+ if (r_dest.IsFloat() || r_src.IsFloat())
+ return OpFpRegCopy(r_dest, r_src);
+ // TODO: Check that r_src and r_dest are both 32 or both 64 bits length.
+ LIR* res;
+ if (r_dest.Is64Bit() || r_src.Is64Bit()) {
+ res = RawLIR(current_dalvik_offset_, kMips64Move, r_dest.GetReg(), r_src.GetReg());
+ } else {
+ res = RawLIR(current_dalvik_offset_, kMips64Sll, r_dest.GetReg(), r_src.GetReg(), 0);
+ }
+ if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+void Mips64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ if (r_dest != r_src) {
+ LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+ AppendLIR(res);
+ }
+}
+
+void Mips64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+ OpRegCopy(r_dest, r_src);
+}
+
+void Mips64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
+ // Implement as a branch-over.
+ // TODO: Conditional move?
+ LoadConstant(rs_dest, true_val);
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LoadConstant(rs_dest, false_val);
+ LIR* target_label = NewLIR0(kPseudoTargetLabel);
+ ne_branchover->target = target_label;
+}
+
+void Mips64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
+ UNIMPLEMENTED(FATAL) << "Need codegen for select";
+}
+
+void Mips64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
+ UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
+}
+
+RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
+ bool is_div) {
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
+ return rl_result;
+}
+
+RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
+ bool is_div) {
+ RegStorage t_reg = AllocTemp();
+ NewLIR3(kMips64Addiu, t_reg.GetReg(), rZERO, lit);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), t_reg.GetReg());
+ FreeTemp(t_reg);
+ return rl_result;
+}
+
+RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+ LOG(FATAL) << "Unexpected use of GenDivRem for Mips64";
+ UNREACHABLE();
+}
+
+RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
+ LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips64";
+ UNREACHABLE();
+}
+
+bool Mips64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
+ UNUSED(info, is_long, is_object);
+ return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
+ UNUSED(info);
+ // TODO: add Mips64 implementation.
+ return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
+ UNUSED(info);
+ // TODO: add Mips64 implementation.
+ return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedSqrt(CallInfo* info) {
+ UNUSED(info);
+ return false;
+}
+
+bool Mips64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
+ if (size != kSignedByte) {
+ // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
+ return false;
+ }
+ RegLocation rl_src_address = info->args[0]; // Long address.
+ rl_src_address = NarrowRegLoc(rl_src_address); // Ignore high half in info->args[1].
+ RegLocation rl_dest = InlineTarget(info);
+ RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ DCHECK(size == kSignedByte);
+ LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
+ StoreValue(rl_dest, rl_result);
+ return true;
+}
+
+bool Mips64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
+ if (size != kSignedByte) {
+ // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
+ return false;
+ }
+ RegLocation rl_src_address = info->args[0]; // Long address.
+ rl_src_address = NarrowRegLoc(rl_src_address); // Ignore high half in info->args[1].
+ RegLocation rl_src_value = info->args[2]; // [size] value.
+ RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+ DCHECK(size == kSignedByte);
+ RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
+ StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
+ return true;
+}
+
+LIR* Mips64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ UNUSED(reg, target);
+ LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
+ LOG(FATAL) << "Unexpected use of OpVldm for Mips64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
+ LOG(FATAL) << "Unexpected use of OpVstm for Mips64";
+ UNREACHABLE();
+}
+
+void Mips64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
+ int lit, int first_bit, int second_bit) {
+ UNUSED(lit);
+ RegStorage t_reg = AllocTemp();
+ OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
+ FreeTemp(t_reg);
+ if (first_bit != 0) {
+ OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
+ }
+}
+
+void Mips64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
+ GenDivZeroCheck(reg);
+}
+
+// Test suspend flag, return target of taken suspend branch.
+LIR* Mips64Mir2Lir::OpTestSuspend(LIR* target) {
+ OpRegImm(kOpSub, rs_rMIPS64_SUSPEND, 1);
+ return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rs_rMIPS64_SUSPEND, 0, target);
+}
+
+// Decrement register and branch on condition.
+LIR* Mips64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
+ OpRegImm(kOpSub, reg, 1);
+ return OpCmpImmBranch(c_code, reg, 0, target);
+}
+
+bool Mips64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
+ RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
+ LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips64";
+ UNREACHABLE();
+}
+
+bool Mips64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
+ LOG(FATAL) << "Unexpected use of easyMultiply in Mips64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
+ LOG(FATAL) << "Unexpected use of OpIT in Mips64";
+ UNREACHABLE();
+}
+
+void Mips64Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
+ LOG(FATAL) << "Unexpected use of OpEndIT in Mips64";
+}
+
+void Mips64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
+ switch (opcode) {
+ case Instruction::NOT_LONG:
+ GenNotLong(rl_dest, rl_src2);
+ return;
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ GenMulLong(rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
+ return;
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
+ return;
+ case Instruction::AND_LONG:
+ case Instruction::AND_LONG_2ADDR:
+ GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::NEG_LONG:
+ GenNegLong(rl_dest, rl_src2);
+ return;
+
+ default:
+ LOG(FATAL) << "Invalid long arith op";
+ return;
+ }
+}
+
+void Mips64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegReg(kOpMvn, rl_result.reg, rl_src.reg);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(kMips64Dmul, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2, bool is_div,
+ int flags) {
+ UNUSED(opcode);
+ // TODO: Implement easy div/rem?
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+ GenDivZeroCheckWide(rl_src2.reg);
+ }
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(is_div ? kMips64Ddiv : kMips64Dmod, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
+ rl_src2.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+/*
+ * Generate array load
+ */
+void Mips64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_dest, int scale) {
+ RegisterClass reg_class = RegClassBySize(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+ RegLocation rl_result;
+ rl_array = LoadValue(rl_array, kRefReg);
+ rl_index = LoadValue(rl_index, kCoreReg);
+
+ // FIXME: need to add support for rl_index.is_const.
+
+ if (size == k64 || size == kDouble) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ // Null object?
+ GenNullCheck(rl_array.reg, opt_flags);
+
+ RegStorage reg_ptr = AllocTempRef();
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ RegStorage reg_len;
+ if (needs_range_check) {
+ reg_len = AllocTemp();
+ // Get len.
+ Load32Disp(rl_array.reg, len_offset, reg_len);
+ }
+ // reg_ptr -> array data.
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
+ FreeTemp(rl_array.reg);
+ if ((size == k64) || (size == kDouble)) {
+ if (scale) {
+ RegStorage r_new_index = AllocTemp();
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
+ OpRegReg(kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(r_new_index);
+ } else {
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
+ }
+ FreeTemp(rl_index.reg);
+ rl_result = EvalLoc(rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
+ FreeTemp(reg_len);
+ }
+ LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, kNotVolatile);
+
+ FreeTemp(reg_ptr);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(rl_dest, reg_class, true);
+
+ if (needs_range_check) {
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
+ FreeTemp(reg_len);
+ }
+ if (rl_result.ref) {
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), As32BitReg(rl_result.reg), scale,
+ kReference);
+ } else {
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+ }
+
+ FreeTemp(reg_ptr);
+ StoreValue(rl_dest, rl_result);
+ }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void Mips64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+ RegLocation rl_index, RegLocation rl_src, int scale,
+ bool card_mark) {
+ RegisterClass reg_class = RegClassBySize(size);
+ int len_offset = mirror::Array::LengthOffset().Int32Value();
+ int data_offset;
+
+ if (size == k64 || size == kDouble) {
+ data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+ } else {
+ data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+ }
+
+ rl_array = LoadValue(rl_array, kRefReg);
+ rl_index = LoadValue(rl_index, kCoreReg);
+
+ // FIXME: need to add support for rl_index.is_const.
+
+ RegStorage reg_ptr;
+ bool allocated_reg_ptr_temp = false;
+ if (IsTemp(rl_array.reg) && !card_mark) {
+ Clobber(rl_array.reg);
+ reg_ptr = rl_array.reg;
+ } else {
+ reg_ptr = AllocTemp();
+ OpRegCopy(reg_ptr, rl_array.reg);
+ allocated_reg_ptr_temp = true;
+ }
+
+ // Null object?
+ GenNullCheck(rl_array.reg, opt_flags);
+
+ bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
+ RegStorage reg_len;
+ if (needs_range_check) {
+ reg_len = AllocTemp();
+ // NOTE: max live temps(4) here.
+ // Get len.
+ Load32Disp(rl_array.reg, len_offset, reg_len);
+ }
+ // reg_ptr -> array data.
+ OpRegImm(kOpAdd, reg_ptr, data_offset);
+ // At this point, reg_ptr points to array, 2 live temps.
+ if ((size == k64) || (size == kDouble)) {
+ // TUNING: specific wide routine that can handle fp regs.
+ if (scale) {
+ RegStorage r_new_index = AllocTemp();
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
+ OpRegReg(kOpAdd, reg_ptr, r_new_index);
+ FreeTemp(r_new_index);
+ } else {
+ OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
+ }
+ rl_src = LoadValueWide(rl_src, reg_class);
+
+ if (needs_range_check) {
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
+ FreeTemp(reg_len);
+ }
+
+ StoreBaseDisp(reg_ptr, 0, rl_src.reg, size, kNotVolatile);
+ } else {
+ rl_src = LoadValue(rl_src, reg_class);
+ if (needs_range_check) {
+ GenArrayBoundsCheck(rl_index.reg, reg_len);
+ FreeTemp(reg_len);
+ }
+ StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
+ }
+ if (allocated_reg_ptr_temp) {
+ FreeTemp(reg_ptr);
+ }
+ if (card_mark) {
+ MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
+ }
+}
+
+void Mips64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift) {
+ OpKind op = kOpBkpt;
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case: " << opcode;
+ }
+ rl_shift = LoadValue(rl_shift, kCoreReg);
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_shift, int flags) {
+ UNUSED(flags);
+ OpKind op = kOpBkpt;
+ // Per spec, we only care about low 6 bits of shift amount.
+ int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ if (shift_amount == 0) {
+ StoreValueWide(rl_dest, rl_src1);
+ return;
+ }
+
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case";
+ }
+ OpRegRegImm(op, rl_result.reg, rl_src1.reg, shift_amount);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
+ // Default - bail to non-const handler.
+ GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
+}
+
+void Mips64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(kMips64Sll, rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void Mips64Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
+ RegLocation rl_src, RegisterClass reg_class) {
+ FlushAllRegs(); // Send everything to home location.
+ CallRuntimeHelperRegLocation(trampoline, rl_src, false);
+ if (rl_dest.wide) {
+ RegLocation rl_result;
+ rl_result = GetReturnWide(reg_class);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ RegLocation rl_result;
+ rl_result = GetReturn(reg_class);
+ StoreValue(rl_dest, rl_result);
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/mips64_lir.h b/compiler/dex/quick/mips64/mips64_lir.h
new file mode 100644
index 0000000000..4a5c5ce3c8
--- /dev/null
+++ b/compiler/dex/quick/mips64/mips64_lir.h
@@ -0,0 +1,648 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
+#define ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
+
+#include "dex/reg_location.h"
+#include "dex/reg_storage.h"
+
+namespace art {
+
+/*
+ * Runtime register conventions.
+ *
+ * zero is always the value 0
+ * at is scratch (normally used as temp reg by assembler)
+ * v0, v1 are scratch (normally hold subroutine return values)
+ * a0-a7 are scratch (normally hold subroutine arguments)
+ * t0-t3, t8 are scratch
+ * t9 is scratch (normally used for function calls)
+ * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
+ * s1 (rMIPS_SELF) is reserved [holds current &Thread]
+ * s2-s7 are callee save (promotion target)
+ * k0, k1 are reserved for use by interrupt handlers
+ * gp is reserved for global pointer
+ * sp is reserved
+ * s8 is callee save (promotion target)
+ * ra is scratch (normally holds the return addr)
+ *
+ * Preserved across C calls: s0-s8
+ * Trashed across C calls: at, v0-v1, a0-a7, t0-t3, t8-t9, gp, ra
+ *
+ * Floating pointer registers
+ * NOTE: there are 32 fp registers.
+ * f0-f31
+ *
+ * f0-f31 trashed across C calls
+ *
+ * For mips64 code use:
+ * a0-a7 to hold operands
+ * v0-v1 to hold results
+ * t0-t3, t8-t9 for temps
+ *
+ * All jump/branch instructions have a delay slot after it.
+ *
+ * Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1] | {Note: resides in caller's frame}
+ * | . |
+ * | IN[0] |
+ * | caller's Method* |
+ * +========================+ {Note: start of callee's frame}
+ * | spill region | {variable sized - will include lr if non-leaf.}
+ * +------------------------+
+ * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1] |
+ * | V[locals-2] |
+ * | . |
+ * | . |
+ * | V[1] |
+ * | V[0] |
+ * +------------------------+
+ * | 0 to 3 words padding |
+ * +------------------------+
+ * | OUT[outs-1] |
+ * | OUT[outs-2] |
+ * | . |
+ * | OUT[0] |
+ * | cur_method* | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+
+#define rARG0 rA0d
+#define rs_rARG0 rs_rA0d
+#define rARG1 rA1d
+#define rs_rARG1 rs_rA1d
+#define rARG2 rA2d
+#define rs_rARG2 rs_rA2d
+#define rARG3 rA3d
+#define rs_rARG3 rs_rA3d
+#define rARG4 rA4d
+#define rs_rARG4 rs_rA4d
+#define rARG5 rA5d
+#define rs_rARG5 rs_rA5d
+#define rARG6 rA6d
+#define rs_rARG6 rs_rA6d
+#define rARG7 rA7d
+#define rs_rARG7 rs_rA7d
+#define rRESULT0 rV0d
+#define rs_rRESULT0 rs_rV0d
+#define rRESULT1 rV1d
+#define rs_rRESULT1 rs_rV1d
+
+#define rFARG0 rF12
+#define rs_rFARG0 rs_rF12
+#define rFARG1 rF13
+#define rs_rFARG1 rs_rF13
+#define rFARG2 rF14
+#define rs_rFARG2 rs_rF14
+#define rFARG3 rF15
+#define rs_rFARG3 rs_rF15
+#define rFARG4 rF16
+#define rs_rFARG4 rs_rF16
+#define rFARG5 rF17
+#define rs_rFARG5 rs_rF17
+#define rFARG6 rF18
+#define rs_rFARG6 rs_rF18
+#define rFARG7 rF19
+#define rs_rFARG7 rs_rF19
+#define rFRESULT0 rF0
+#define rs_rFRESULT0 rs_rF0
+#define rFRESULT1 rF1
+#define rs_rFRESULT1 rs_rF1
+
+// Regs not used for Mips64.
+#define rMIPS64_LR RegStorage::kInvalidRegVal
+#define rMIPS64_PC RegStorage::kInvalidRegVal
+
+enum Mips64ResourceEncodingPos {
+ kMips64GPReg0 = 0,
+ kMips64RegSP = 29,
+ kMips64RegLR = 31,
+ kMips64FPReg0 = 32,
+ kMips64FPRegEnd = 64,
+ kMips64RegPC = kMips64FPRegEnd,
+ kMips64RegEnd = 65,
+};
+
+enum Mips64NativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
+ rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
+ rZEROd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
+ rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
+ rATd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
+ rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
+ rV0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
+ rV1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
+ rV1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
+ rA0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
+ rA0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
+ rA1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
+ rA1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
+ rA2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
+ rA2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
+ rA3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
+ rA3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
+ rA4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
+ rA4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
+ rA5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
+ rA5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
+ rA6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+ rA6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
+ rA7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+ rA7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
+ rT0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+ rT0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
+ rT1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+ rT1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
+ rT2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+ rT2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
+ rT3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+ rT3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
+ rS0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
+ rS0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 16,
+ rS1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
+ rS1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 17,
+ rS2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
+ rS2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 18,
+ rS3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
+ rS3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 19,
+ rS4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
+ rS4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 20,
+ rS5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
+ rS5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 21,
+ rS6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
+ rS6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 22,
+ rS7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
+ rS7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 23,
+ rT8 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
+ rT8d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 24,
+ rT9 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
+ rT9d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 25,
+ rK0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
+ rK0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 26,
+ rK1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
+ rK1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 27,
+ rGP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
+ rGPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 28,
+ rSP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
+ rSPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 29,
+ rFP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
+ rFPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 30,
+ rRA = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
+ rRAd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 31,
+
+ rF0 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
+ rF1 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
+ rF2 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
+ rF3 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 3,
+ rF4 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 4,
+ rF5 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
+ rF6 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
+ rF7 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
+ rF8 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 8,
+ rF9 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 9,
+ rF10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
+ rF11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
+ rF12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
+ rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
+ rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
+ rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
+ rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
+ rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
+ rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
+ rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
+ rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
+ rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
+ rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
+ rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
+ rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
+ rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
+ rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
+ rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
+ rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
+ rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
+ rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
+ rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
+
+ rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
+ rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
+ rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+ rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
+ rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+ rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
+ rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+ rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
+ rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+ rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
+ rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+ rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+ rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+ rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+ rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+ rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
+ rD16 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+ rD17 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
+ rD18 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+ rD19 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
+ rD20 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+ rD21 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
+ rD22 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+ rD23 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
+ rD24 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+ rD25 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
+ rD26 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+ rD27 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
+ rD28 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+ rD29 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
+ rD30 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
+ rD31 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
+};
+
+constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
+constexpr RegStorage rs_rZEROd(RegStorage::kValid | rZEROd);
+constexpr RegStorage rs_rAT(RegStorage::kValid | rAT);
+constexpr RegStorage rs_rATd(RegStorage::kValid | rATd);
+constexpr RegStorage rs_rV0(RegStorage::kValid | rV0);
+constexpr RegStorage rs_rV0d(RegStorage::kValid | rV0d);
+constexpr RegStorage rs_rV1(RegStorage::kValid | rV1);
+constexpr RegStorage rs_rV1d(RegStorage::kValid | rV1d);
+constexpr RegStorage rs_rA0(RegStorage::kValid | rA0);
+constexpr RegStorage rs_rA0d(RegStorage::kValid | rA0d);
+constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
+constexpr RegStorage rs_rA1d(RegStorage::kValid | rA1d);
+constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
+constexpr RegStorage rs_rA2d(RegStorage::kValid | rA2d);
+constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
+constexpr RegStorage rs_rA3d(RegStorage::kValid | rA3d);
+constexpr RegStorage rs_rA4(RegStorage::kValid | rA4);
+constexpr RegStorage rs_rA4d(RegStorage::kValid | rA4d);
+constexpr RegStorage rs_rA5(RegStorage::kValid | rA5);
+constexpr RegStorage rs_rA5d(RegStorage::kValid | rA5d);
+constexpr RegStorage rs_rA6(RegStorage::kValid | rA6);
+constexpr RegStorage rs_rA6d(RegStorage::kValid | rA6d);
+constexpr RegStorage rs_rA7(RegStorage::kValid | rA7);
+constexpr RegStorage rs_rA7d(RegStorage::kValid | rA7d);
+constexpr RegStorage rs_rT0(RegStorage::kValid | rT0);
+constexpr RegStorage rs_rT0d(RegStorage::kValid | rT0d);
+constexpr RegStorage rs_rT1(RegStorage::kValid | rT1);
+constexpr RegStorage rs_rT1d(RegStorage::kValid | rT1d);
+constexpr RegStorage rs_rT2(RegStorage::kValid | rT2);
+constexpr RegStorage rs_rT2d(RegStorage::kValid | rT2d);
+constexpr RegStorage rs_rT3(RegStorage::kValid | rT3);
+constexpr RegStorage rs_rT3d(RegStorage::kValid | rT3d);
+constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
+constexpr RegStorage rs_rS0d(RegStorage::kValid | rS0d);
+constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
+constexpr RegStorage rs_rS1d(RegStorage::kValid | rS1d);
+constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
+constexpr RegStorage rs_rS2d(RegStorage::kValid | rS2d);
+constexpr RegStorage rs_rS3(RegStorage::kValid | rS3);
+constexpr RegStorage rs_rS3d(RegStorage::kValid | rS3d);
+constexpr RegStorage rs_rS4(RegStorage::kValid | rS4);
+constexpr RegStorage rs_rS4d(RegStorage::kValid | rS4d);
+constexpr RegStorage rs_rS5(RegStorage::kValid | rS5);
+constexpr RegStorage rs_rS5d(RegStorage::kValid | rS5d);
+constexpr RegStorage rs_rS6(RegStorage::kValid | rS6);
+constexpr RegStorage rs_rS6d(RegStorage::kValid | rS6d);
+constexpr RegStorage rs_rS7(RegStorage::kValid | rS7);
+constexpr RegStorage rs_rS7d(RegStorage::kValid | rS7d);
+constexpr RegStorage rs_rT8(RegStorage::kValid | rT8);
+constexpr RegStorage rs_rT8d(RegStorage::kValid | rT8d);
+constexpr RegStorage rs_rT9(RegStorage::kValid | rT9);
+constexpr RegStorage rs_rT9d(RegStorage::kValid | rT9d);
+constexpr RegStorage rs_rK0(RegStorage::kValid | rK0);
+constexpr RegStorage rs_rK0d(RegStorage::kValid | rK0d);
+constexpr RegStorage rs_rK1(RegStorage::kValid | rK1);
+constexpr RegStorage rs_rK1d(RegStorage::kValid | rK1d);
+constexpr RegStorage rs_rGP(RegStorage::kValid | rGP);
+constexpr RegStorage rs_rGPd(RegStorage::kValid | rGPd);
+constexpr RegStorage rs_rSP(RegStorage::kValid | rSP);
+constexpr RegStorage rs_rSPd(RegStorage::kValid | rSPd);
+constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
+constexpr RegStorage rs_rFPd(RegStorage::kValid | rFPd);
+constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
+constexpr RegStorage rs_rRAd(RegStorage::kValid | rRAd);
+
+constexpr RegStorage rs_rMIPS64_LR(RegStorage::kInvalid); // Not used for MIPS64.
+constexpr RegStorage rs_rMIPS64_PC(RegStorage::kInvalid); // Not used for MIPS64.
+constexpr RegStorage rs_rMIPS64_COUNT(RegStorage::kInvalid); // Not used for MIPS64.
+
+constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
+constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
+constexpr RegStorage rs_rF2(RegStorage::kValid | rF2);
+constexpr RegStorage rs_rF3(RegStorage::kValid | rF3);
+constexpr RegStorage rs_rF4(RegStorage::kValid | rF4);
+constexpr RegStorage rs_rF5(RegStorage::kValid | rF5);
+constexpr RegStorage rs_rF6(RegStorage::kValid | rF6);
+constexpr RegStorage rs_rF7(RegStorage::kValid | rF7);
+constexpr RegStorage rs_rF8(RegStorage::kValid | rF8);
+constexpr RegStorage rs_rF9(RegStorage::kValid | rF9);
+constexpr RegStorage rs_rF10(RegStorage::kValid | rF10);
+constexpr RegStorage rs_rF11(RegStorage::kValid | rF11);
+constexpr RegStorage rs_rF12(RegStorage::kValid | rF12);
+constexpr RegStorage rs_rF13(RegStorage::kValid | rF13);
+constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
+constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
+constexpr RegStorage rs_rF16(RegStorage::kValid | rF16);
+constexpr RegStorage rs_rF17(RegStorage::kValid | rF17);
+constexpr RegStorage rs_rF18(RegStorage::kValid | rF18);
+constexpr RegStorage rs_rF19(RegStorage::kValid | rF19);
+constexpr RegStorage rs_rF20(RegStorage::kValid | rF20);
+constexpr RegStorage rs_rF21(RegStorage::kValid | rF21);
+constexpr RegStorage rs_rF22(RegStorage::kValid | rF22);
+constexpr RegStorage rs_rF23(RegStorage::kValid | rF23);
+constexpr RegStorage rs_rF24(RegStorage::kValid | rF24);
+constexpr RegStorage rs_rF25(RegStorage::kValid | rF25);
+constexpr RegStorage rs_rF26(RegStorage::kValid | rF26);
+constexpr RegStorage rs_rF27(RegStorage::kValid | rF27);
+constexpr RegStorage rs_rF28(RegStorage::kValid | rF28);
+constexpr RegStorage rs_rF29(RegStorage::kValid | rF29);
+constexpr RegStorage rs_rF30(RegStorage::kValid | rF30);
+constexpr RegStorage rs_rF31(RegStorage::kValid | rF31);
+
+constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
+constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
+constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
+constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
+constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
+constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
+constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
+constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
+constexpr RegStorage rs_rD8(RegStorage::kValid | rD8);
+constexpr RegStorage rs_rD9(RegStorage::kValid | rD9);
+constexpr RegStorage rs_rD10(RegStorage::kValid | rD10);
+constexpr RegStorage rs_rD11(RegStorage::kValid | rD11);
+constexpr RegStorage rs_rD12(RegStorage::kValid | rD12);
+constexpr RegStorage rs_rD13(RegStorage::kValid | rD13);
+constexpr RegStorage rs_rD14(RegStorage::kValid | rD14);
+constexpr RegStorage rs_rD15(RegStorage::kValid | rD15);
+constexpr RegStorage rs_rD16(RegStorage::kValid | rD16);
+constexpr RegStorage rs_rD17(RegStorage::kValid | rD17);
+constexpr RegStorage rs_rD18(RegStorage::kValid | rD18);
+constexpr RegStorage rs_rD19(RegStorage::kValid | rD19);
+constexpr RegStorage rs_rD20(RegStorage::kValid | rD20);
+constexpr RegStorage rs_rD21(RegStorage::kValid | rD21);
+constexpr RegStorage rs_rD22(RegStorage::kValid | rD22);
+constexpr RegStorage rs_rD23(RegStorage::kValid | rD23);
+constexpr RegStorage rs_rD24(RegStorage::kValid | rD24);
+constexpr RegStorage rs_rD25(RegStorage::kValid | rD25);
+constexpr RegStorage rs_rD26(RegStorage::kValid | rD26);
+constexpr RegStorage rs_rD27(RegStorage::kValid | rD27);
+constexpr RegStorage rs_rD28(RegStorage::kValid | rD28);
+constexpr RegStorage rs_rD29(RegStorage::kValid | rD29);
+constexpr RegStorage rs_rD30(RegStorage::kValid | rD30);
+constexpr RegStorage rs_rD31(RegStorage::kValid | rD31);
+
+// TODO: reduce/eliminate use of these.
+#define rMIPS64_SUSPEND rS0d
+#define rs_rMIPS64_SUSPEND rs_rS0d
+#define rMIPS64_SELF rS1d
+#define rs_rMIPS64_SELF rs_rS1d
+#define rMIPS64_SP rSPd
+#define rs_rMIPS64_SP rs_rSPd
+#define rMIPS64_ARG0 rARG0
+#define rs_rMIPS64_ARG0 rs_rARG0
+#define rMIPS64_ARG1 rARG1
+#define rs_rMIPS64_ARG1 rs_rARG1
+#define rMIPS64_ARG2 rARG2
+#define rs_rMIPS64_ARG2 rs_rARG2
+#define rMIPS64_ARG3 rARG3
+#define rs_rMIPS64_ARG3 rs_rARG3
+#define rMIPS64_ARG4 rARG4
+#define rs_rMIPS64_ARG4 rs_rARG4
+#define rMIPS64_ARG5 rARG5
+#define rs_rMIPS64_ARG5 rs_rARG5
+#define rMIPS64_ARG6 rARG6
+#define rs_rMIPS64_ARG6 rs_rARG6
+#define rMIPS64_ARG7 rARG7
+#define rs_rMIPS64_ARG7 rs_rARG7
+#define rMIPS64_FARG0 rFARG0
+#define rs_rMIPS64_FARG0 rs_rFARG0
+#define rMIPS64_FARG1 rFARG1
+#define rs_rMIPS64_FARG1 rs_rFARG1
+#define rMIPS64_FARG2 rFARG2
+#define rs_rMIPS64_FARG2 rs_rFARG2
+#define rMIPS64_FARG3 rFARG3
+#define rs_rMIPS64_FARG3 rs_rFARG3
+#define rMIPS64_FARG4 rFARG4
+#define rs_rMIPS64_FARG4 rs_rFARG4
+#define rMIPS64_FARG5 rFARG5
+#define rs_rMIPS64_FARG5 rs_rFARG5
+#define rMIPS64_FARG6 rFARG6
+#define rs_rMIPS64_FARG6 rs_rFARG6
+#define rMIPS64_FARG7 rFARG7
+#define rs_rMIPS64_FARG7 rs_rFARG7
+#define rMIPS64_RET0 rRESULT0
+#define rs_rMIPS64_RET0 rs_rRESULT0
+#define rMIPS64_RET1 rRESULT1
+#define rs_rMIPS64_RET1 rs_rRESULT1
+#define rMIPS64_INVOKE_TGT rT9d
+#define rs_rMIPS64_INVOKE_TGT rs_rT9d
+#define rMIPS64_COUNT RegStorage::kInvalidRegVal
+
+// RegisterLocation templates return values (r_V0).
+const RegLocation mips64_loc_c_return
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
+ RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_ref
+ {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_float
+ {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
+ RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rD0), INVALID_SREG, INVALID_SREG};
+
+enum Mips64ShiftEncodings {
+ kMips64Lsl = 0x0,
+ kMips64Lsr = 0x1,
+ kMips64Asr = 0x2,
+ kMips64Ror = 0x3
+};
+
+// MIPS64 sync kinds (Note: support for kinds other than kSYNC0 may not exist).
+#define kSYNC0 0x00
+#define kSYNC_WMB 0x04
+#define kSYNC_MB 0x01
+#define kSYNC_ACQUIRE 0x11
+#define kSYNC_RELEASE 0x12
+#define kSYNC_RMB 0x13
+
+// TODO: Use smaller hammer when appropriate for target CPU.
+#define kST kSYNC0
+#define kSY kSYNC0
+
+/*
+ * The following enum defines the list of supported Mips64 instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * assemble_mips64.cc.
+ */
+enum Mips64OpCode {
+ kMips64First = 0,
+ kMips6432BitData = kMips64First, // data [31..0].
+ kMips64Addiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMips64Addu, // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
+ kMips64And, // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
+ kMips64Andi, // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
+ kMips64B, // b o [0001000000000000] o[15..0].
+ kMips64Bal, // bal o [0000010000010001] o[15..0].
+ // NOTE: the code tests the range kMips64Beq thru kMips64Bne, so adding an instruction in this
+ // range may require updates.
+ kMips64Beq, // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
+ kMips64Beqz, // beqz s,o [000100] s[25..21] [00000] o[15..0].
+ kMips64Bgez, // bgez s,o [000001] s[25..21] [00001] o[15..0].
+ kMips64Bgtz, // bgtz s,o [000111] s[25..21] [00000] o[15..0].
+ kMips64Blez, // blez s,o [000110] s[25..21] [00000] o[15..0].
+ kMips64Bltz, // bltz s,o [000001] s[25..21] [00000] o[15..0].
+ kMips64Bnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
+ kMips64Bne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
+ kMips64Break, // break code [000000] code[25..6] [001101].
+ kMips64Daddiu, // daddiu t,s,imm16 [011001] s[25..21] t[20..16] imm16[15..11].
+ kMips64Daddu, // daddu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101101].
+ kMips64Dahi, // dahi s,imm16 [000001] s[25..21] [00110] imm16[15..11].
+ kMips64Dati, // dati s,imm16 [000001] s[25..21] [11110] imm16[15..11].
+ kMips64Daui, // daui t,s,imm16 [011101] s[25..21] t[20..16] imm16[15..11].
+ kMips64Ddiv, // ddiv d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011110].
+ kMips64Div, // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
+ kMips64Dmod, // dmod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011110].
+ kMips64Dmul, // dmul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011100].
+ kMips64Dmfc1, // dmfc1 t,s [01000100001] t[20..16] s[15..11] [00000000000].
+ kMips64Dmtc1, // dmtc1 t,s [01000100101] t[20..16] s[15..11] [00000000000].
+ kMips64Drotr32, // drotr32 d,t,a [00000000001] t[20..16] d[15..11] a[10..6] [111110].
+ kMips64Dsll, // dsll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111000].
+ kMips64Dsll32, // dsll32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111100].
+ kMips64Dsrl, // dsrl d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111010].
+ kMips64Dsrl32, // dsrl32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111110].
+ kMips64Dsra, // dsra d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111011].
+ kMips64Dsra32, // dsra32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111111].
+ kMips64Dsllv, // dsllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010100].
+ kMips64Dsrlv, // dsrlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010110].
+ kMips64Dsrav, // dsrav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010111].
+ kMips64Dsubu, // dsubu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101111].
+ kMips64Ext, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
+ kMips64Faddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+ kMips64Fadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+ kMips64Fdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+ kMips64Fdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+ kMips64Fmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+ kMips64Fmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+ kMips64Fsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+ kMips64Fsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+ kMips64Fcvtsd, // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+ kMips64Fcvtsw, // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+ kMips64Fcvtds, // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+ kMips64Fcvtdw, // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+ kMips64Fcvtws, // cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+ kMips64Fcvtwd, // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+ kMips64Fmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+ kMips64Fmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+ kMips64Fnegd, // neg.d d,s [01000110001] [00000] s[15..11] d[10..6] [000111].
+ kMips64Fnegs, // neg.s d,s [01000110000] [00000] s[15..11] d[10..6] [000111].
+ kMips64Fldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+ kMips64Flwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+ kMips64Fsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+ kMips64Fswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+ kMips64Jal, // jal t [000011] t[25..0].
+ kMips64Jalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
+ kMips64Lahi, // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
+ kMips64Lalo, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
+ kMips64Lb, // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
+ kMips64Lbu, // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
+ kMips64Ld, // ld t,o(b) [110111] b[25..21] t[20..16] o[15..0].
+ kMips64Lh, // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
+ kMips64Lhu, // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
+ kMips64Lui, // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
+ kMips64Lw, // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
+ kMips64Lwu, // lwu t,o(b) [100111] b[25..21] t[20..16] o[15..0].
+ kMips64Mfc1, // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
+ kMips64Mtc1, // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
+ kMips64Move, // move d,s [000000] s[25..21] [00000] d[15..11] [00000101101].
+ kMips64Mod, // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
+ kMips64Mul, // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
+ kMips64Nop, // nop [00000000000000000000000000000000].
+ kMips64Nor, // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
+ kMips64Or, // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
+ kMips64Ori, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMips64Sb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
+ kMips64Sd, // sd t,o(b) [111111] b[25..21] t[20..16] o[15..0].
+ kMips64Seb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
+ kMips64Seh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
+ kMips64Sh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
+ kMips64Sll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
+ kMips64Sllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
+ kMips64Slt, // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
+ kMips64Slti, // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
+ kMips64Sltu, // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
+ kMips64Sra, // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
+ kMips64Srav, // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
+ kMips64Srl, // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
+ kMips64Srlv, // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
+ kMips64Subu, // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
+ kMips64Sw, // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
+ kMips64Sync, // sync kind [000000] [0000000000000000] s[10..6] [001111].
+ kMips64Xor, // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
+ kMips64Xori, // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
+ kMips64CurrPC, // jal to .+8 to materialize pc.
+ kMips64Delta, // Psuedo for ori t, s, <label>-<label>.
+ kMips64DeltaHi, // Pseudo for lui t, high16(<label>-<label>).
+ kMips64DeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
+ kMips64Undefined, // undefined [011001xxxxxxxxxxxxxxxx].
+ kMips64Last
+};
+std::ostream& operator<<(std::ostream& os, const Mips64OpCode& rhs);
+
+// Instruction assembly field_loc kind.
+enum Mips64EncodingKind {
+ kFmtUnused,
+ kFmtBitBlt, // Bit string using end/start.
+ kFmtDfp, // Double FP reg.
+ kFmtSfp, // Single FP reg.
+ kFmtBlt5_2, // Same 5-bit field to 2 locations.
+};
+std::ostream& operator<<(std::ostream& os, const Mips64EncodingKind& rhs);
+
+// Struct used to define the snippet positions for each MIPS64 opcode.
+struct Mips64EncodingMap {
+ uint32_t skeleton;
+ struct {
+ Mips64EncodingKind kind;
+ int end; // end for kFmtBitBlt, 1-bit slice end for FP regs.
+ int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
+ } field_loc[4];
+ Mips64OpCode opcode;
+ uint64_t flags;
+ const char *name;
+ const char* fmt;
+ int size; // Note: size is in bytes.
+};
+
+extern Mips64EncodingMap EncodingMap[kMips64Last];
+
+#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
+#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
+#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) // 2 offsets must fit.
+
+} // namespace art
+
+#endif // ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
diff --git a/compiler/dex/quick/mips64/target_mips64.cc b/compiler/dex/quick/mips64/target_mips64.cc
new file mode 100644
index 0000000000..6ed9617bde
--- /dev/null
+++ b/compiler/dex/quick/mips64/target_mips64.cc
@@ -0,0 +1,653 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include <inttypes.h>
+
+#include <string>
+
+#include "arch/mips64/instruction_set_features_mips64.h"
+#include "backend_mips64.h"
+#include "base/logging.h"
+#include "dex/compiler_ir.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "driver/compiler_driver.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+static constexpr RegStorage core_regs_arr32[] =
+ {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
+ rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5,
+ rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
+static constexpr RegStorage core_regs_arr64[] =
+ {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
+ rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
+ rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
+ rs_rFPd, rs_rRAd};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_regs_arr[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+ rs_rF31};
+static constexpr RegStorage dp_regs_arr[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+ rs_rD31};
+#else
+static constexpr RegStorage sp_regs_arr[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_regs_arr[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23};
+#endif
+static constexpr RegStorage reserved_regs_arr32[] =
+ {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
+static constexpr RegStorage reserved_regs_arr64[] =
+ {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
+static constexpr RegStorage core_temps_arr32[] =
+ {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0,
+ rs_rT1, rs_rT2, rs_rT3, rs_rT8};
+static constexpr RegStorage core_temps_arr64[] =
+ {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
+ rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_temps_arr[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+ rs_rF31};
+static constexpr RegStorage dp_temps_arr[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+ rs_rD31};
+#else
+static constexpr RegStorage sp_temps_arr[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_temps_arr[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23};
+#endif
+
+static constexpr ArrayRef<const RegStorage> empty_pool;
+static constexpr ArrayRef<const RegStorage> core_regs32(core_regs_arr32);
+static constexpr ArrayRef<const RegStorage> core_regs64(core_regs_arr64);
+static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
+static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
+static constexpr ArrayRef<const RegStorage> reserved_regs32(reserved_regs_arr32);
+static constexpr ArrayRef<const RegStorage> reserved_regs64(reserved_regs_arr64);
+static constexpr ArrayRef<const RegStorage> core_temps32(core_temps_arr32);
+static constexpr ArrayRef<const RegStorage> core_temps64(core_temps_arr64);
+static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
+static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
+
+RegLocation Mips64Mir2Lir::LocCReturn() {
+ return mips64_loc_c_return;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnRef() {
+ return mips64_loc_c_return_ref;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnWide() {
+ return mips64_loc_c_return_wide;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnFloat() {
+ return mips64_loc_c_return_float;
+}
+
+RegLocation Mips64Mir2Lir::LocCReturnDouble() {
+ return mips64_loc_c_return_double;
+}
+
+// Return a target-dependent special register.
+RegStorage Mips64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+ RegStorage res_reg;
+ switch (reg) {
+ case kSelf: res_reg = rs_rS1; break;
+ case kSuspend: res_reg = rs_rS0; break;
+ case kLr: res_reg = rs_rRA; break;
+ case kPc: res_reg = RegStorage::InvalidReg(); break;
+ case kSp: res_reg = rs_rSP; break;
+ case kArg0: res_reg = rs_rA0; break;
+ case kArg1: res_reg = rs_rA1; break;
+ case kArg2: res_reg = rs_rA2; break;
+ case kArg3: res_reg = rs_rA3; break;
+ case kArg4: res_reg = rs_rA4; break;
+ case kArg5: res_reg = rs_rA5; break;
+ case kArg6: res_reg = rs_rA6; break;
+ case kArg7: res_reg = rs_rA7; break;
+ case kFArg0: res_reg = rs_rF12; break;
+ case kFArg1: res_reg = rs_rF13; break;
+ case kFArg2: res_reg = rs_rF14; break;
+ case kFArg3: res_reg = rs_rF15; break;
+ case kFArg4: res_reg = rs_rF16; break;
+ case kFArg5: res_reg = rs_rF17; break;
+ case kFArg6: res_reg = rs_rF18; break;
+ case kFArg7: res_reg = rs_rF19; break;
+ case kRet0: res_reg = rs_rV0; break;
+ case kRet1: res_reg = rs_rV1; break;
+ case kInvokeTgt: res_reg = rs_rT9; break;
+ case kHiddenArg: res_reg = rs_rT0; break;
+ case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
+ case kCount: res_reg = RegStorage::InvalidReg(); break;
+ default: res_reg = RegStorage::InvalidReg();
+ }
+ return res_reg;
+}
+
+RegStorage Mips64Mir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
+ const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
+ {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
+ const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
+ const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
+ {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
+ const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+
+ RegStorage result = RegStorage::InvalidReg();
+ if (arg.IsFP()) {
+ if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
+ DCHECK(!arg.IsRef());
+ result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
+ arg.IsWide() ? kWide : kNotWide);
+ }
+ } else {
+ if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
+ DCHECK(!(arg.IsWide() && arg.IsRef()));
+ result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
+ arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
+ }
+ }
+ return result;
+}
+
+/*
+ * Decode the register id.
+ */
+ResourceMask Mips64Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
+ return ResourceMask::Bit((reg.IsFloat() ? kMips64FPReg0 : 0) + reg.GetRegNum());
+}
+
+ResourceMask Mips64Mir2Lir::GetPCUseDefEncoding() const {
+ return ResourceMask::Bit(kMips64RegPC);
+}
+
+
+void Mips64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+ ResourceMask* def_mask) {
+ DCHECK(!lir->flags.use_def_invalid);
+
+ // Mips64-specific resource map setup here.
+ if (flags & REG_DEF_SP) {
+ def_mask->SetBit(kMips64RegSP);
+ }
+
+ if (flags & REG_USE_SP) {
+ use_mask->SetBit(kMips64RegSP);
+ }
+
+ if (flags & REG_DEF_LR) {
+ def_mask->SetBit(kMips64RegLR);
+ }
+}
+
+/* For dumping instructions */
+#define MIPS64_REG_COUNT 32
+static const char *mips64_reg_name[MIPS64_REG_COUNT] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in assemble_mips64.cc.
+ */
+std::string Mips64Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
+ std::string buf;
+ int i;
+ const char *fmt_end = &fmt[strlen(fmt)];
+ char tbuf[256];
+ char nc;
+ while (fmt < fmt_end) {
+ int operand;
+ if (*fmt == '!') {
+ fmt++;
+ DCHECK_LT(fmt, fmt_end);
+ nc = *fmt++;
+ if (nc == '!') {
+ strcpy(tbuf, "!");
+ } else {
+ DCHECK_LT(fmt, fmt_end);
+ DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
+ operand = lir->operands[nc-'0'];
+ switch (*fmt++) {
+ case 'b':
+ strcpy(tbuf, "0000");
+ for (i = 3; i >= 0; i--) {
+ tbuf[i] += operand & 1;
+ operand >>= 1;
+ }
+ break;
+ case 's':
+ snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
+ break;
+ case 'S':
+ DCHECK_EQ(RegStorage::RegNum(operand) & 1, 0);
+ snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
+ break;
+ case 'h':
+ snprintf(tbuf, arraysize(tbuf), "%04x", operand);
+ break;
+ case 'M':
+ case 'd':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand);
+ break;
+ case 'D':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand+1);
+ break;
+ case 'E':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
+ break;
+ case 'F':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
+ break;
+ case 't':
+ snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
+ reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 + (operand << 1),
+ lir->target);
+ break;
+ case 'T':
+ snprintf(tbuf, arraysize(tbuf), "0x%08x", operand << 2);
+ break;
+ case 'u': {
+ int offset_1 = lir->operands[0];
+ int offset_2 = NEXT_LIR(lir)->operands[0];
+ uintptr_t target =
+ (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
+ (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
+ snprintf(tbuf, arraysize(tbuf), "%p", reinterpret_cast<void*>(target));
+ break;
+ }
+
+ /* Nothing to print for BLX_2 */
+ case 'v':
+ strcpy(tbuf, "see above");
+ break;
+ case 'r':
+ DCHECK(operand >= 0 && operand < MIPS64_REG_COUNT);
+ strcpy(tbuf, mips64_reg_name[operand]);
+ break;
+ case 'N':
+ // Placeholder for delay slot handling
+ strcpy(tbuf, "; nop");
+ break;
+ default:
+ strcpy(tbuf, "DecodeError");
+ break;
+ }
+ buf += tbuf;
+ }
+ } else {
+ buf += *fmt++;
+ }
+ }
+ return buf;
+}
+
+// FIXME: need to redo resource maps for MIPS64 - fix this at that time.
+void Mips64Mir2Lir::DumpResourceMask(LIR *mips64_lir, const ResourceMask& mask, const char *prefix) {
+ char buf[256];
+ buf[0] = 0;
+
+ if (mask.Equals(kEncodeAll)) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
+
+ for (i = 0; i < kMips64RegEnd; i++) {
+ if (mask.HasBit(i)) {
+ snprintf(num, arraysize(num), "%d ", i);
+ strcat(buf, num);
+ }
+ }
+
+ if (mask.HasBit(ResourceMask::kCCode)) {
+ strcat(buf, "cc ");
+ }
+ if (mask.HasBit(ResourceMask::kFPStatus)) {
+ strcat(buf, "fpcc ");
+ }
+ // Memory bits.
+ if (mips64_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
+ snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
+ DECODE_ALIAS_INFO_REG(mips64_lir->flags.alias_info),
+ DECODE_ALIAS_INFO_WIDE(mips64_lir->flags.alias_info) ? "(+1)" : "");
+ }
+ if (mask.HasBit(ResourceMask::kLiteral)) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask.HasBit(ResourceMask::kHeapRef)) {
+ strcat(buf, "heap ");
+ }
+ if (mask.HasBit(ResourceMask::kMustNotAlias)) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
+}
+
+/*
+ * TUNING: is true leaf? Can't just use METHOD_IS_LEAF to determine as some
+ * instructions might call out to C/assembly helper functions. Until
+ * machinery is in place, always spill lr.
+ */
+
+void Mips64Mir2Lir::AdjustSpillMask() {
+ core_spill_mask_ |= (1 << rs_rRA.GetRegNum());
+ num_core_spills_++;
+}
+
+/* Clobber all regs that might be used by an external C call */
+void Mips64Mir2Lir::ClobberCallerSave() {
+ Clobber(rs_rZEROd);
+ Clobber(rs_rATd);
+ Clobber(rs_rV0d);
+ Clobber(rs_rV1d);
+ Clobber(rs_rA0d);
+ Clobber(rs_rA1d);
+ Clobber(rs_rA2d);
+ Clobber(rs_rA3d);
+ Clobber(rs_rA4d);
+ Clobber(rs_rA5d);
+ Clobber(rs_rA6d);
+ Clobber(rs_rA7d);
+ Clobber(rs_rT0d);
+ Clobber(rs_rT1d);
+ Clobber(rs_rT2d);
+ Clobber(rs_rT3d);
+ Clobber(rs_rT8d);
+ Clobber(rs_rT9d);
+ Clobber(rs_rK0d);
+ Clobber(rs_rK1d);
+ Clobber(rs_rGPd);
+ Clobber(rs_rFPd);
+ Clobber(rs_rRAd);
+
+ Clobber(rs_rF0);
+ Clobber(rs_rF1);
+ Clobber(rs_rF2);
+ Clobber(rs_rF3);
+ Clobber(rs_rF4);
+ Clobber(rs_rF5);
+ Clobber(rs_rF6);
+ Clobber(rs_rF7);
+ Clobber(rs_rF8);
+ Clobber(rs_rF9);
+ Clobber(rs_rF10);
+ Clobber(rs_rF11);
+ Clobber(rs_rF12);
+ Clobber(rs_rF13);
+ Clobber(rs_rF14);
+ Clobber(rs_rF15);
+ Clobber(rs_rD0);
+ Clobber(rs_rD1);
+ Clobber(rs_rD2);
+ Clobber(rs_rD3);
+ Clobber(rs_rD4);
+ Clobber(rs_rD5);
+ Clobber(rs_rD6);
+ Clobber(rs_rD7);
+}
+
+RegLocation Mips64Mir2Lir::GetReturnWideAlt() {
+ UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS64";
+ RegLocation res = LocCReturnWide();
+ return res;
+}
+
+RegLocation Mips64Mir2Lir::GetReturnAlt() {
+ UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS64";
+ RegLocation res = LocCReturn();
+ return res;
+}
+
+/* To be used when explicitly managing register use */
+void Mips64Mir2Lir::LockCallTemps() {
+ LockTemp(rs_rMIPS64_ARG0);
+ LockTemp(rs_rMIPS64_ARG1);
+ LockTemp(rs_rMIPS64_ARG2);
+ LockTemp(rs_rMIPS64_ARG3);
+ LockTemp(rs_rMIPS64_ARG4);
+ LockTemp(rs_rMIPS64_ARG5);
+ LockTemp(rs_rMIPS64_ARG6);
+ LockTemp(rs_rMIPS64_ARG7);
+}
+
+/* To be used when explicitly managing register use */
+void Mips64Mir2Lir::FreeCallTemps() {
+ FreeTemp(rs_rMIPS64_ARG0);
+ FreeTemp(rs_rMIPS64_ARG1);
+ FreeTemp(rs_rMIPS64_ARG2);
+ FreeTemp(rs_rMIPS64_ARG3);
+ FreeTemp(rs_rMIPS64_ARG4);
+ FreeTemp(rs_rMIPS64_ARG5);
+ FreeTemp(rs_rMIPS64_ARG6);
+ FreeTemp(rs_rMIPS64_ARG7);
+ FreeTemp(TargetReg(kHiddenArg));
+}
+
+bool Mips64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
+ if (cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
+ NewLIR1(kMips64Sync, 0 /* Only stype currently supported */);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void Mips64Mir2Lir::CompilerInitializeRegAlloc() {
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs32, core_regs64 , sp_regs,
+ dp_regs, reserved_regs32, reserved_regs64,
+ core_temps32, core_temps64, sp_temps,
+ dp_temps));
+
+ // Target-specific adjustments.
+
+ // Alias single precision floats to appropriate half of overlapping double.
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
+ int sp_reg_num = info->GetReg().GetRegNum();
+ int dp_reg_num = sp_reg_num;
+ RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+ RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+ // Double precision register's master storage should refer to itself.
+ DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+ // Redirect single precision's master storage to master.
+ info->SetMaster(dp_reg_info);
+ // Singles should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ }
+
+ // Alias 32bit W registers to corresponding 64bit X registers.
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
+ int d_reg_num = info->GetReg().GetRegNum();
+ RegStorage d_reg = RegStorage::Solo64(d_reg_num);
+ RegisterInfo* d_reg_info = GetRegInfo(d_reg);
+ // 64bit D register's master storage should refer to itself.
+ DCHECK_EQ(d_reg_info, d_reg_info->Master());
+ // Redirect 32bit master storage to 64bit D.
+ info->SetMaster(d_reg_info);
+ // 32bit should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ }
+
+ // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
+ // TODO: adjust when we roll to hard float calling convention.
+ reg_pool_->next_core_reg_ = 2;
+ reg_pool_->next_sp_reg_ = 2;
+ reg_pool_->next_dp_reg_ = 1;
+}
+
+/*
+ * In the Arm code a it is typical to use the link register
+ * to hold the target address. However, for Mips64 we must
+ * ensure that all branch instructions can be restarted if
+ * there is a trap in the shadow. Allocate a temp register.
+ */
+RegStorage Mips64Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+ // NOTE: native pointer.
+ LoadWordDisp(rs_rMIPS64_SELF, GetThreadOffset<8>(trampoline).Int32Value(), rs_rT9d);
+ return rs_rT9d;
+}
+
+LIR* Mips64Mir2Lir::CheckSuspendUsingLoad() {
+ RegStorage tmp = AllocTemp();
+ // NOTE: native pointer.
+ LoadWordDisp(rs_rMIPS64_SELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
+ LIR *inst = LoadWordDisp(tmp, 0, tmp);
+ FreeTemp(tmp);
+ return inst;
+}
+
+LIR* Mips64Mir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
+ DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadStore().
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers.
+ RegStorage reg_ptr = TargetReg(kArg0);
+ OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
+ RegStorage r_tgt = LoadHelper(kQuickA64Load);
+ LIR *ret = OpReg(kOpBlx, r_tgt);
+ OpRegCopy(r_dest, TargetReg(kRet0));
+ return ret;
+}
+
+LIR* Mips64Mir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
+ DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore().
+ DCHECK(!r_src.IsPair());
+ ClobberCallerSave();
+ LockCallTemps(); // Using fixed registers.
+ RegStorage temp_ptr = AllocTemp();
+ OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
+ RegStorage temp_value = AllocTemp();
+ OpRegCopy(temp_value, r_src);
+ OpRegCopy(TargetReg(kArg0), temp_ptr);
+ OpRegCopy(TargetReg(kArg1), temp_value);
+ FreeTemp(temp_ptr);
+ FreeTemp(temp_value);
+ RegStorage r_tgt = LoadHelper(kQuickA64Store);
+ return OpReg(kOpBlx, r_tgt);
+}
+
+void Mips64Mir2Lir::SpillCoreRegs() {
+ if (num_core_spills_ == 0) {
+ return;
+ }
+ uint32_t mask = core_spill_mask_;
+ // Start saving from offset 0 so that ra ends up on the top of the frame.
+ int offset = 0;
+ OpRegImm(kOpSub, rs_rSPd, num_core_spills_ * 8);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ StoreWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
+ offset += 8;
+ }
+ }
+}
+
+void Mips64Mir2Lir::UnSpillCoreRegs() {
+ if (num_core_spills_ == 0) {
+ return;
+ }
+ uint32_t mask = core_spill_mask_;
+ int offset = frame_size_ - num_core_spills_ * 8;
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ LoadWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
+ offset += 8;
+ }
+ }
+ OpRegImm(kOpAdd, rs_rSPd, frame_size_);
+}
+
+bool Mips64Mir2Lir::IsUnconditionalBranch(LIR* lir) {
+ return (lir->opcode == kMips64B);
+}
+
+RegisterClass Mips64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
+ if (UNLIKELY(is_volatile)) {
+ // On Mips64, atomic 64-bit load/store requires a core register.
+ // Smaller aligned load/store is atomic for both core and fp registers.
+ if (size == k64 || size == kDouble) {
+ return kCoreReg;
+ }
+ }
+ // TODO: Verify that both core and fp registers are suitable for smaller sizes.
+ return RegClassBySize(size);
+}
+
+Mips64Mir2Lir::Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+ : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this) {
+ for (int i = 0; i < kMips64Last; i++) {
+ DCHECK_EQ(Mips64Mir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << Mips64Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(Mips64Mir2Lir::EncodingMap[i].opcode);
+ }
+}
+
+Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena) {
+ return new Mips64Mir2Lir(cu, mir_graph, arena);
+}
+
+uint64_t Mips64Mir2Lir::GetTargetInstFlags(int opcode) {
+ DCHECK(!IsPseudoLirOp(opcode));
+ return Mips64Mir2Lir::EncodingMap[opcode].flags;
+}
+
+const char* Mips64Mir2Lir::GetTargetInstName(int opcode) {
+ DCHECK(!IsPseudoLirOp(opcode));
+ return Mips64Mir2Lir::EncodingMap[opcode].name;
+}
+
+const char* Mips64Mir2Lir::GetTargetInstFmt(int opcode) {
+ DCHECK(!IsPseudoLirOp(opcode));
+ return Mips64Mir2Lir::EncodingMap[opcode].fmt;
+}
+
+void Mips64Mir2Lir::GenBreakpoint(int code) {
+ NewLIR1(kMips64Break, code);
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/mips64/utility_mips64.cc b/compiler/dex/quick/mips64/utility_mips64.cc
new file mode 100644
index 0000000000..38e354cbde
--- /dev/null
+++ b/compiler/dex/quick/mips64/utility_mips64.cc
@@ -0,0 +1,875 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_mips64.h"
+
+#include "arch/mips64/instruction_set_features_mips64.h"
+#include "base/logging.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "dex/reg_storage_eq.h"
+#include "driver/compiler_driver.h"
+#include "mips64_lir.h"
+
+namespace art {
+
+/* This file contains codegen for the MIPS64 ISA. */
+
+LIR* Mips64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ int opcode;
+ // Must be both DOUBLE or both not DOUBLE.
+ DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
+ if (r_dest.Is64Bit()) {
+ if (r_dest.IsDouble()) {
+ if (r_src.IsDouble()) {
+ opcode = kMips64Fmovd;
+ } else {
+ // Note the operands are swapped for the dmtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMips64Dmtc1;
+ }
+ } else {
+ DCHECK(r_src.IsDouble());
+ opcode = kMips64Dmfc1;
+ }
+ } else {
+ if (r_dest.IsSingle()) {
+ if (r_src.IsSingle()) {
+ opcode = kMips64Fmovs;
+ } else {
+ // Note the operands are swapped for the mtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMips64Mtc1;
+ }
+ } else {
+ DCHECK(r_src.IsSingle());
+ opcode = kMips64Mfc1;
+ }
+ }
+ LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
+ if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
+ }
+ return res;
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantInt(int32_t value) {
+ // For encodings, see LoadConstantNoClobber below.
+ return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
+ UNUSED(value);
+ return false; // TUNING
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
+ return false; // TUNING
+}
+
+bool Mips64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
+ UNUSED(value);
+ return false; // TUNING
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool. If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* Mips64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
+ LIR *res;
+
+ RegStorage r_dest_save = r_dest;
+ int is_fp_reg = r_dest.IsFloat();
+ if (is_fp_reg) {
+ DCHECK(r_dest.IsSingle());
+ r_dest = AllocTemp();
+ }
+
+ // See if the value can be constructed cheaply.
+ if (value == 0) {
+ res = NewLIR2(kMips64Move, r_dest.GetReg(), rZERO);
+ } else if (IsUint<16>(value)) {
+ // Use OR with (unsigned) immediate to encode 16b unsigned int.
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZERO, value);
+ } else if (IsInt<16>(value)) {
+ // Use ADD with (signed) immediate to encode 16b signed int.
+ res = NewLIR3(kMips64Addiu, r_dest.GetReg(), rZERO, value);
+ } else {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ if (value & 0xffff)
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+ }
+
+ if (is_fp_reg) {
+ NewLIR2(kMips64Mtc1, r_dest.GetReg(), r_dest_save.GetReg());
+ FreeTemp(r_dest);
+ }
+
+ return res;
+}
+
+LIR* Mips64Mir2Lir::OpUnconditionalBranch(LIR* target) {
+ LIR* res = NewLIR1(kMips64B, 0 /* offset to be patched during assembly*/);
+ res->target = target;
+ return res;
+}
+
+LIR* Mips64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
+ Mips64OpCode opcode = kMips64Nop;
+ switch (op) {
+ case kOpBlx:
+ opcode = kMips64Jalr;
+ break;
+ case kOpBx:
+ return NewLIR2(kMips64Jalr, rZERO, r_dest_src.GetReg());
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpReg";
+ }
+ return NewLIR2(opcode, rRAd, r_dest_src.GetReg());
+}
+
+LIR* Mips64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
+ LIR *res;
+ bool neg = (value < 0);
+ int abs_value = (neg) ? -value : value;
+ bool short_form = (abs_value & 0xff) == abs_value;
+ bool is64bit = r_dest_src1.Is64Bit();
+ RegStorage r_scratch;
+ Mips64OpCode opcode = kMips64Nop;
+ switch (op) {
+ case kOpAdd:
+ return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
+ case kOpSub:
+ return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
+ default:
+ LOG(FATAL) << "Bad case in OpRegImm";
+ }
+ if (short_form) {
+ res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
+ } else {
+ if (is64bit) {
+ r_scratch = AllocTempWide();
+ res = LoadConstantWide(r_scratch, value);
+ } else {
+ r_scratch = AllocTemp();
+ res = LoadConstant(r_scratch, value);
+ }
+ if (op == kOpCmp) {
+ NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
+ } else {
+ NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
+ }
+ }
+ return res;
+}
+
+LIR* Mips64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest,
+ RegStorage r_src1, RegStorage r_src2) {
+ Mips64OpCode opcode = kMips64Nop;
+ bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit();
+
+ switch (op) {
+ case kOpAdd:
+ if (is64bit) {
+ opcode = kMips64Daddu;
+ } else {
+ opcode = kMips64Addu;
+ }
+ break;
+ case kOpSub:
+ if (is64bit) {
+ opcode = kMips64Dsubu;
+ } else {
+ opcode = kMips64Subu;
+ }
+ break;
+ case kOpAnd:
+ opcode = kMips64And;
+ break;
+ case kOpMul:
+ opcode = kMips64Mul;
+ break;
+ case kOpOr:
+ opcode = kMips64Or;
+ break;
+ case kOpXor:
+ opcode = kMips64Xor;
+ break;
+ case kOpLsl:
+ if (is64bit) {
+ opcode = kMips64Dsllv;
+ } else {
+ opcode = kMips64Sllv;
+ }
+ break;
+ case kOpLsr:
+ if (is64bit) {
+ opcode = kMips64Dsrlv;
+ } else {
+ opcode = kMips64Srlv;
+ }
+ break;
+ case kOpAsr:
+ if (is64bit) {
+ opcode = kMips64Dsrav;
+ } else {
+ opcode = kMips64Srav;
+ }
+ break;
+ case kOpAdc:
+ case kOpSbc:
+ LOG(FATAL) << "No carry bit on MIPS64";
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpRegRegReg";
+ break;
+ }
+ return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
+}
+
+LIR* Mips64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
+ LIR *res;
+ Mips64OpCode opcode = kMips64Nop;
+ bool short_form = true;
+ bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit();
+
+ switch (op) {
+ case kOpAdd:
+ if (is64bit) {
+ if (IS_SIMM16(value)) {
+ opcode = kMips64Daddiu;
+ } else {
+ short_form = false;
+ opcode = kMips64Daddu;
+ }
+ } else {
+ if (IS_SIMM16(value)) {
+ opcode = kMips64Addiu;
+ } else {
+ short_form = false;
+ opcode = kMips64Addu;
+ }
+ }
+ break;
+ case kOpSub:
+ if (is64bit) {
+ if (IS_SIMM16((-value))) {
+ value = -value;
+ opcode = kMips64Daddiu;
+ } else {
+ short_form = false;
+ opcode = kMips64Dsubu;
+ }
+ } else {
+ if (IS_SIMM16((-value))) {
+ value = -value;
+ opcode = kMips64Addiu;
+ } else {
+ short_form = false;
+ opcode = kMips64Subu;
+ }
+ }
+ break;
+ case kOpLsl:
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsll;
+ } else {
+ opcode = kMips64Dsll32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMips64Sll;
+ }
+ break;
+ case kOpLsr:
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsrl;
+ } else {
+ opcode = kMips64Dsrl32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMips64Srl;
+ }
+ break;
+ case kOpAsr:
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsra;
+ } else {
+ opcode = kMips64Dsra32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMips64Sra;
+ }
+ break;
+ case kOpAnd:
+ if (IS_UIMM16((value))) {
+ opcode = kMips64Andi;
+ } else {
+ short_form = false;
+ opcode = kMips64And;
+ }
+ break;
+ case kOpOr:
+ if (IS_UIMM16((value))) {
+ opcode = kMips64Ori;
+ } else {
+ short_form = false;
+ opcode = kMips64Or;
+ }
+ break;
+ case kOpXor:
+ if (IS_UIMM16((value))) {
+ opcode = kMips64Xori;
+ } else {
+ short_form = false;
+ opcode = kMips64Xor;
+ }
+ break;
+ case kOpMul:
+ short_form = false;
+ opcode = kMips64Mul;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in OpRegRegImm";
+ break;
+ }
+
+ if (short_form) {
+ res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
+ } else {
+ if (r_dest != r_src1) {
+ res = LoadConstant(r_dest, value);
+ NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
+ } else {
+ if (is64bit) {
+ RegStorage r_scratch = AllocTempWide();
+ res = LoadConstantWide(r_scratch, value);
+ NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+ } else {
+ RegStorage r_scratch = AllocTemp();
+ res = LoadConstant(r_scratch, value);
+ NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
+ }
+ }
+ }
+ return res;
+}
+
+LIR* Mips64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
+ Mips64OpCode opcode = kMips64Nop;
+ LIR *res;
+ switch (op) {
+ case kOpMov:
+ opcode = kMips64Move;
+ break;
+ case kOpMvn:
+ return NewLIR3(kMips64Nor, r_dest_src1.GetReg(), r_src2.GetReg(), rZEROd);
+ case kOpNeg:
+ if (r_dest_src1.Is64Bit())
+ return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
+ else
+ return NewLIR3(kMips64Subu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
+ case kOpAdd:
+ case kOpAnd:
+ case kOpMul:
+ case kOpOr:
+ case kOpSub:
+ case kOpXor:
+ return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
+ case kOp2Byte:
+ res = NewLIR2(kMips64Seb, r_dest_src1.GetReg(), r_src2.GetReg());
+ return res;
+ case kOp2Short:
+ res = NewLIR2(kMips64Seh, r_dest_src1.GetReg(), r_src2.GetReg());
+ return res;
+ case kOp2Char:
+ return NewLIR3(kMips64Andi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
+ default:
+ LOG(FATAL) << "Bad case in OpRegReg";
+ UNREACHABLE();
+ }
+ return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
+}
+
+LIR* Mips64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+ MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset,
+ RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc,
+ RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
+ LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
+ LIR *res = nullptr;
+ DCHECK(r_dest.Is64Bit());
+ RegStorage r_dest_save = r_dest;
+ int is_fp_reg = r_dest.IsFloat();
+ if (is_fp_reg) {
+ DCHECK(r_dest.IsDouble());
+ r_dest = AllocTemp();
+ }
+
+ int bit31 = (value & UINT64_C(0x80000000)) != 0;
+
+ // Loads with 1 instruction.
+ if (IsUint<16>(value)) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
+ } else if (IsInt<16>(value)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
+ } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ } else if (IsInt<32>(value)) {
+ // Loads with 2 instructions.
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
+ } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
+ NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
+ } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
+ (value >> 32) <= (32767 - bit31)) {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
+ } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
+ NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
+ } else {
+ int64_t tmp = value;
+ int shift_cnt = 0;
+ while ((tmp & 1) == 0) {
+ tmp >>= 1;
+ shift_cnt++;
+ }
+
+ if (IsUint<16>(tmp)) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else if (IsInt<16>(tmp)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else if (IsInt<32>(tmp)) {
+ // Loads with 3 instructions.
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp >> 16);
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else {
+ tmp = value >> 16;
+ shift_cnt = 16;
+ while ((tmp & 1) == 0) {
+ tmp >>= 1;
+ shift_cnt++;
+ }
+
+ if (IsUint<16>(tmp)) {
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else if (IsInt<16>(tmp)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else {
+ // Loads with 3-4 instructions.
+ uint64_t tmp2 = value;
+ if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
+ res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp2 >> 16);
+ }
+ if ((tmp2 & 0xFFFF) != 0) {
+ if (res)
+ NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp2);
+ else
+ res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp2);
+ }
+ if (bit31) {
+ tmp2 += UINT64_C(0x100000000);
+ }
+ if (((tmp2 >> 32) & 0xFFFF) != 0) {
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
+ }
+ if (tmp2 & UINT64_C(0x800000000000)) {
+ tmp2 += UINT64_C(0x1000000000000);
+ }
+ if ((tmp2 >> 48) != 0) {
+ NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
+ }
+ }
+ }
+ }
+
+ if (is_fp_reg) {
+ NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
+ FreeTemp(r_dest);
+ }
+
+ return res;
+}
+
+/* Load value from base + scaled index. */
+LIR* Mips64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
+ int scale, OpSize size) {
+ LIR *first = NULL;
+ LIR *res;
+ RegStorage t_reg;
+ Mips64OpCode opcode = kMips64Nop;
+ bool is64bit = r_dest.Is64Bit();
+ if (is64bit) {
+ t_reg = AllocTempWide();
+ } else {
+ t_reg = AllocTemp();
+ }
+
+ if (r_dest.IsFloat()) {
+ DCHECK(r_dest.IsSingle());
+ DCHECK((size == k32) || (size == kSingle) || (size == kReference));
+ size = kSingle;
+ } else if (is64bit) {
+ size = k64;
+ } else {
+ if (size == kSingle)
+ size = k32;
+ }
+
+ if (!scale) {
+ if (is64bit) {
+ first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ } else {
+ first = NewLIR3(kMips64Addu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ }
+ } else {
+ first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+ NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ }
+
+ switch (size) {
+ case k64:
+ opcode = kMips64Ld;
+ break;
+ case kSingle:
+ opcode = kMips64Flwc1;
+ break;
+ case k32:
+ case kReference:
+ opcode = kMips64Lw;
+ break;
+ case kUnsignedHalf:
+ opcode = kMips64Lhu;
+ break;
+ case kSignedHalf:
+ opcode = kMips64Lh;
+ break;
+ case kUnsignedByte:
+ opcode = kMips64Lbu;
+ break;
+ case kSignedByte:
+ opcode = kMips64Lb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in LoadBaseIndexed";
+ }
+
+ res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
+ FreeTemp(t_reg);
+ return (first) ? first : res;
+}
+
+/* Store value base base + scaled index. */
+LIR* Mips64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
+ int scale, OpSize size) {
+ LIR *first = NULL;
+ Mips64OpCode opcode = kMips64Nop;
+ RegStorage t_reg = AllocTemp();
+
+ if (r_src.IsFloat()) {
+ DCHECK(r_src.IsSingle());
+ DCHECK((size == k32) || (size == kSingle) || (size == kReference));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = k32;
+ }
+
+ if (!scale) {
+ first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ } else {
+ first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+ NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ }
+
+ switch (size) {
+ case kSingle:
+ opcode = kMips64Fswc1;
+ break;
+ case k32:
+ case kReference:
+ opcode = kMips64Sw;
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMips64Sh;
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMips64Sb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in StoreBaseIndexed";
+ }
+ NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
+ return first;
+}
+
+// FIXME: don't split r_dest into 2 containers.
+LIR* Mips64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size) {
+/*
+ * Load value from base + displacement. Optionally perform null check
+ * on base (which must have an associated s_reg and MIR). If not
+ * performing null check, incoming MIR can be null. IMPORTANT: this
+ * code must not allocate any new temps. If a new register is needed
+ * and base and dest are the same, spill some other register to
+ * rlp and then restore.
+ */
+ LIR *res;
+ LIR *load = NULL;
+ Mips64OpCode opcode = kMips64Nop;
+ bool short_form = IS_SIMM16(displacement);
+
+ switch (size) {
+ case k64:
+ case kDouble:
+ r_dest = Check64BitReg(r_dest);
+ if (!r_dest.IsFloat())
+ opcode = kMips64Ld;
+ else
+ opcode = kMips64Fldc1;
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case k32:
+ case kSingle:
+ case kReference:
+ opcode = kMips64Lw;
+ if (r_dest.IsFloat()) {
+ opcode = kMips64Flwc1;
+ DCHECK(r_dest.IsSingle());
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ opcode = kMips64Lhu;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kSignedHalf:
+ opcode = kMips64Lh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ opcode = kMips64Lbu;
+ break;
+ case kSignedByte:
+ opcode = kMips64Lb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
+ }
+
+ if (short_form) {
+ load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
+ } else {
+ RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
+ res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
+ load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
+ if (r_tmp != r_dest)
+ FreeTemp(r_tmp);
+ }
+
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK_EQ(r_base, rs_rMIPS64_SP);
+ AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
+ }
+ return res;
+}
+
+LIR* Mips64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ OpSize size, VolatileKind is_volatile) {
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+ displacement & 0x7)) {
+ // TODO: use lld/scd instructions for Mips64.
+ // Do atomic 64-bit load.
+ return GenAtomic64Load(r_base, displacement, r_dest);
+ }
+
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k64;
+ }
+ LIR* load;
+ load = LoadBaseDispBody(r_base, displacement, r_dest, size);
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ GenMemBarrier(kLoadAny);
+ }
+
+ return load;
+}
+
+// FIXME: don't split r_dest into 2 containers.
+LIR* Mips64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
+ LIR *res;
+ LIR *store = NULL;
+ Mips64OpCode opcode = kMips64Nop;
+ bool short_form = IS_SIMM16(displacement);
+
+ switch (size) {
+ case k64:
+ case kDouble:
+ r_src = Check64BitReg(r_src);
+ if (!r_src.IsFloat())
+ opcode = kMips64Sd;
+ else
+ opcode = kMips64Fsdc1;
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case k32:
+ case kSingle:
+ case kReference:
+ opcode = kMips64Sw;
+ if (r_src.IsFloat()) {
+ opcode = kMips64Fswc1;
+ DCHECK(r_src.IsSingle());
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ case kUnsignedHalf:
+ case kSignedHalf:
+ opcode = kMips64Sh;
+ DCHECK_EQ((displacement & 0x1), 0);
+ break;
+ case kUnsignedByte:
+ case kSignedByte:
+ opcode = kMips64Sb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in StoreBaseDispBody";
+ }
+
+ if (short_form) {
+ store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
+ } else {
+ RegStorage r_scratch = AllocTemp();
+ res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
+ store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
+ FreeTemp(r_scratch);
+ }
+
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK_EQ(r_base, rs_rMIPS64_SP);
+ AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
+ }
+
+ return res;
+}
+
+LIR* Mips64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size, VolatileKind is_volatile) {
+ if (is_volatile == kVolatile) {
+ // Ensure that prior accesses become visible to other threads first.
+ GenMemBarrier(kAnyStore);
+ }
+
+ LIR* store;
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+ displacement & 0x7)) {
+ // TODO - use lld/scd instructions for Mips64
+ // Do atomic 64-bit load.
+ store = GenAtomic64Store(r_base, displacement, r_src);
+ } else {
+ // TODO: base this on target.
+ if (size == kWord) {
+ size = k64;
+ }
+ store = StoreBaseDispBody(r_base, displacement, r_src, size);
+ }
+
+ if (UNLIKELY(is_volatile == kVolatile)) {
+ // Preserve order with respect to any subsequent volatile loads.
+ // We need StoreLoad, but that generally requires the most expensive barrier.
+ GenMemBarrier(kAnyAny);
+ }
+
+ return store;
+}
+
+LIR* Mips64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
+ LOG(FATAL) << "Unexpected use of OpMem for MIPS64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
+ UNUSED(cc, target);
+ LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS64";
+ UNREACHABLE();
+}
+
+LIR* Mips64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
+ return OpReg(op, r_tgt);
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index fcf4716c1a..13a6d9d815 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -45,6 +45,7 @@
#include "dex/quick/arm/backend_arm.h"
#include "dex/quick/arm64/backend_arm64.h"
#include "dex/quick/mips/backend_mips.h"
+#include "dex/quick/mips64/backend_mips64.h"
#include "dex/quick/x86/backend_x86.h"
namespace art {
@@ -87,7 +88,17 @@ static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
(1 << kPromoteCompilerTemps) |
0,
// 7 = kMips64.
- ~0U
+ (1 << kLoadStoreElimination) |
+ (1 << kLoadHoisting) |
+ (1 << kSuppressLoads) |
+ (1 << kNullCheckElimination) |
+ (1 << kPromoteRegs) |
+ (1 << kTrackLiveTemps) |
+ (1 << kSafeOptimizations) |
+ (1 << kBBOpt) |
+ (1 << kMatch) |
+ (1 << kPromoteCompilerTemps) |
+ 0
};
static_assert(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t),
"kDisabledOpts unexpected");
@@ -119,7 +130,7 @@ static const char* kSupportedTypes[] = {
// 6 = kMips.
nullptr,
// 7 = kMips64.
- ""
+ nullptr
};
static_assert(sizeof(kSupportedTypes) == 8 * sizeof(char*), "kSupportedTypes unexpected");
@@ -430,7 +441,7 @@ static const int* kUnsupportedOpcodes[] = {
// 6 = kMips.
nullptr,
// 7 = kMips64.
- kAllOpcodes
+ nullptr
};
static_assert(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), "kUnsupportedOpcodes unexpected");
@@ -451,7 +462,7 @@ static const size_t kUnsupportedOpcodesSize[] = {
// 6 = kMips.
0,
// 7 = kMips64.
- arraysize(kAllOpcodes),
+ 0
};
static_assert(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
"kUnsupportedOpcodesSize unexpected");
@@ -624,12 +635,12 @@ CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
}
CompilationUnit cu(driver->GetArenaPool(), instruction_set, driver, class_linker);
- // TODO: Mips64 is not yet implemented.
CHECK((cu.instruction_set == kThumb2) ||
(cu.instruction_set == kArm64) ||
(cu.instruction_set == kX86) ||
(cu.instruction_set == kX86_64) ||
- (cu.instruction_set == kMips));
+ (cu.instruction_set == kMips) ||
+ (cu.instruction_set == kMips64));
// TODO: set this from command line
constexpr bool compiler_flip_match = false;
@@ -798,6 +809,9 @@ Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_
case kMips:
mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
+ case kMips64:
+ mir_to_lir = Mips64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
case kX86:
// Fall-through.
case kX86_64:
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 67fb8040f7..682fa281ac 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -1355,7 +1355,7 @@ RegLocation Mir2Lir::GetReturn(RegisterClass reg_class) {
default: res = LocCReturn(); break;
}
Clobber(res.reg);
- if (cu_->instruction_set == kMips) {
+ if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
MarkInUse(res.reg);
} else {
LockTemp(res.reg);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index df2b520b50..6d2ef158e2 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -2184,8 +2184,10 @@ void CompilerDriver::CompileMethod(Thread* self, const DexFile::CodeItem* code_i
InstructionSetHasGenericJniStub(instruction_set_)) {
// Leaving this empty will trigger the generic JNI version
} else {
- compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
- CHECK(compiled_method != nullptr);
+ if (instruction_set_ != kMips64) { // Use generic JNI for Mips64 (temporarily).
+ compiled_method = compiler_->JniCompile(access_flags, method_idx, dex_file);
+ CHECK(compiled_method != nullptr);
+ }
}
} else if ((access_flags & kAccAbstract) != 0) {
// Abstract methods don't have code.
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 95c2d402b3..d25acc74e2 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -20,6 +20,7 @@
#include "jni/quick/arm/calling_convention_arm.h"
#include "jni/quick/arm64/calling_convention_arm64.h"
#include "jni/quick/mips/calling_convention_mips.h"
+#include "jni/quick/mips64/calling_convention_mips64.h"
#include "jni/quick/x86/calling_convention_x86.h"
#include "jni/quick/x86_64/calling_convention_x86_64.h"
#include "utils.h"
@@ -38,6 +39,8 @@ ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
return new arm64::Arm64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kMips:
return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ case kMips64:
+ return new mips64::Mips64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86_64:
@@ -111,6 +114,8 @@ JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synch
return new arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty);
case kMips:
return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
+ case kMips64:
+ return new mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
case kX86_64:
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index ba73828931..2d9e03a718 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -36,6 +36,7 @@
#include "utils/arm/managed_register_arm.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/mips/managed_register_mips.h"
+#include "utils/mips64/managed_register_mips64.h"
#include "utils/x86/managed_register_x86.h"
#include "thread.h"
@@ -329,7 +330,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// 11. Save return value
FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation();
if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
- if (instruction_set == kMips && main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
+ if ((instruction_set == kMips || instruction_set == kMips64) &&
+ main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
new file mode 100644
index 0000000000..17325d6d49
--- /dev/null
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_mips64.h"
+
+#include "base/logging.h"
+#include "handle_scope-inl.h"
+#include "utils/mips64/managed_register_mips64.h"
+
+namespace art {
+namespace mips64 {
+
+static const GpuRegister kGpuArgumentRegisters[] = {
+ A0, A1, A2, A3, A4, A5, A6, A7
+};
+
+static const FpuRegister kFpuArgumentRegisters[] = {
+ F12, F13, F14, F15, F16, F17, F18, F19
+};
+
+// Calling convention
+ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(T9);
+}
+
+ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(T9);
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+ if (shorty[0] == 'F' || shorty[0] == 'D') {
+ return Mips64ManagedRegister::FromFpuRegister(F0);
+ } else if (shorty[0] == 'V') {
+ return Mips64ManagedRegister::NoRegister();
+ } else {
+ return Mips64ManagedRegister::FromGpuRegister(V0);
+ }
+}
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(V0);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(A0);
+}
+
+bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+ return false; // Everything moved to stack on entry.
+}
+
+bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+ return true;
+}
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
+ LOG(FATAL) << "Should not reach here";
+ return ManagedRegister::NoRegister();
+}
+
+FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+ CHECK(IsCurrentParamOnStack());
+ FrameOffset result =
+ FrameOffset(displacement_.Int32Value() + // displacement
+ sizeof(StackReference<mirror::ArtMethod>) + // Method ref
+ (itr_slots_ * sizeof(uint32_t))); // offset into in args
+ return result;
+}
+
+const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
+ // We spill the argument registers on MIPS64 to free them up for scratch use,
+ // we then assume all arguments are on the stack.
+ if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
+ int reg_index = 1; // we start from A1, A0 holds ArtMethod*.
+
+ // We need to choose the correct register size since the managed
+ // stack uses 32bit stack slots.
+ ResetIterator(FrameOffset(0));
+ while (HasNext()) {
+ if (reg_index < 8) {
+ if (IsCurrentParamAFloatOrDouble()) { // FP regs.
+ FpuRegister arg = kFpuArgumentRegisters[reg_index];
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
+ entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
+ } else { // GP regs.
+ GpuRegister arg = kGpuArgumentRegisters[reg_index];
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
+ entry_spills_.push_back(reg,
+ (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
+ }
+ // e.g. A1, A2, F3, A4, F5, F6, A7
+ reg_index++;
+ }
+
+ Next();
+ }
+ }
+ return entry_spills_;
+}
+
+// JNI calling convention
+
+Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S0));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S1));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S2));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S3));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S4));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S5));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S6));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S7));
+
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(GP));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(SP));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S8));
+}
+
+uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
+ // Compute spill mask to agree with callee saves initialized in the constructor
+ uint32_t result = 0;
+ result = 1 << S0 | 1 << S1 | 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 |
+ 1 << S7 | 1 << GP | 1 << SP | 1 << S8;
+ return result;
+}
+
+ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
+ return Mips64ManagedRegister::FromGpuRegister(AT);
+}
+
+size_t Mips64JniCallingConvention::FrameSize() {
+ // Mehtod* and callee save area size, local reference segment state
+ size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ // Plus return value spill area size
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t Mips64JniCallingConvention::OutArgSize() {
+ return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
+}
+
+bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
+ return itr_args_ < 8;
+}
+
+bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
+ return !IsCurrentParamInRegister();
+}
+
+ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
+ CHECK(IsCurrentParamInRegister());
+ if (IsCurrentParamAFloatOrDouble()) {
+ return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
+ } else {
+ return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
+ }
+}
+
+FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
+ CHECK(IsCurrentParamOnStack());
+ size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_args_ - 8) * kFramePointerSize);
+ CHECK_LT(offset, OutArgSize());
+ return FrameOffset(offset);
+}
+
+size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
+ // all arguments including JNI args
+ size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
+
+ // Nothing on the stack unless there are more than 8 arguments
+ return (all_args > 8) ? all_args - 8 : 0;
+}
+} // namespace mips64
+} // namespace art
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
new file mode 100644
index 0000000000..dc9273b92a
--- /dev/null
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace mips64 {
+
+constexpr size_t kFramePointerSize = 8;
+
+class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+ public:
+ Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+ : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // Managed runtime calling convention
+ ManagedRegister MethodRegister() OVERRIDE;
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+
+ private:
+ ManagedRegisterEntrySpills entry_spills_;
+
+ DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention);
+};
+
+class Mips64JniCallingConvention FINAL : public JniCallingConvention {
+ public:
+ explicit Mips64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+ ~Mips64JniCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister IntReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // JNI calling convention
+ size_t FrameSize() OVERRIDE;
+ size_t OutArgSize() OVERRIDE;
+ const std::vector<ManagedRegister>& CalleeSaveRegisters() const OVERRIDE {
+ return callee_save_regs_;
+ }
+ ManagedRegister ReturnScratchRegister() const OVERRIDE;
+ uint32_t CoreSpillMask() const OVERRIDE;
+ uint32_t FpSpillMask() const OVERRIDE {
+ return 0; // Floats aren't spilled in JNI down call
+ }
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+
+ // Mips64 does not need to extend small return types.
+ bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ return false;
+ }
+
+ protected:
+ size_t NumberOfOutgoingStackArgs() OVERRIDE;
+
+ private:
+ // TODO: these values aren't unique and can be shared amongst instances
+ std::vector<ManagedRegister> callee_save_regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention);
+};
+
+} // namespace mips64
+} // namespace art
+
+#endif // ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8572f4d530..96919dcc08 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -873,14 +873,11 @@ class Dex2Oat FINAL {
// For R6, only interpreter mode is working.
// TODO: fix compiler for Mips32r6.
compiler_filter_string = "interpret-only";
- } else if (instruction_set_ == kMips64) {
- // For Mips64, can only compile in interpreter mode.
- // TODO: fix compiler for Mips64.
- compiler_filter_string = "interpret-only";
} else {
compiler_filter_string = "speed";
}
}
+
CHECK(compiler_filter_string != nullptr);
CompilerOptions::CompilerFilter compiler_filter = CompilerOptions::kDefaultCompilerFilter;
if (strcmp(compiler_filter_string, "verify-none") == 0) {
diff --git a/disassembler/disassembler_mips64.cc b/disassembler/disassembler_mips64.cc
index 2d3239f4f3..7b289d0cf3 100644
--- a/disassembler/disassembler_mips64.cc
+++ b/disassembler/disassembler_mips64.cc
@@ -43,7 +43,7 @@ static const uint32_t kCop1 = (17 << kOpcodeShift);
static const uint32_t kITypeMask = (0x3f << kOpcodeShift);
static const uint32_t kJTypeMask = (0x3f << kOpcodeShift);
static const uint32_t kRTypeMask = ((0x3f << kOpcodeShift) | (0x3f));
-static const uint32_t kSpecial2Mask = (0x3f << kOpcodeShift);
+static const uint32_t kSpecial0Mask = (0x3f << kOpcodeShift);
static const uint32_t kFpMask = kRTypeMask;
static const Mips64Instruction gMips64Instructions[] = {
@@ -58,24 +58,15 @@ static const Mips64Instruction gMips64Instructions[] = {
{ kRTypeMask, 4, "sllv", "DTS", },
{ kRTypeMask, 6, "srlv", "DTS", },
{ kRTypeMask, 7, "srav", "DTS", },
- { kRTypeMask, 8, "jr", "S", },
- // rd = 31 is implicit.
- { kRTypeMask | (0x1f << 11), 9 | (31 << 11), "jalr", "S", },
+ { kRTypeMask | (0x1f << 11), 9 | (31 << 11), "jalr", "S", }, // rd = 31 is implicit.
+ { kRTypeMask | (0x1f << 11), 9, "jr", "S", }, // rd = 0 is implicit.
{ kRTypeMask, 9, "jalr", "DS", }, // General case.
- { kRTypeMask | (0x1f << 6), 10, "movz", "DST", },
- { kRTypeMask | (0x1f << 6), 11, "movn", "DST", },
{ kRTypeMask, 12, "syscall", "", }, // TODO: code
{ kRTypeMask, 13, "break", "", }, // TODO: code
{ kRTypeMask, 15, "sync", "", }, // TODO: type
- { kRTypeMask, 16, "mfhi", "D", },
- { kRTypeMask, 17, "mthi", "S", },
- { kRTypeMask, 18, "mflo", "D", },
- { kRTypeMask, 19, "mtlo", "S", },
- { kRTypeMask, 24, "mult", "ST", },
- { kRTypeMask, 25, "multu", "ST", },
- { kRTypeMask, 26, "div", "ST", },
- { kRTypeMask, 27, "divu", "ST", },
- { kRTypeMask, 32, "add", "DST", },
+ { kRTypeMask, 20, "dsllv", "DTS", },
+ { kRTypeMask, 22, "dsrlv", "DTS", },
+ { kRTypeMask, 23, "dsrav", "DTS", },
{ kRTypeMask, 33, "addu", "DST", },
{ kRTypeMask, 34, "sub", "DST", },
{ kRTypeMask, 35, "subu", "DST", },
@@ -85,27 +76,37 @@ static const Mips64Instruction gMips64Instructions[] = {
{ kRTypeMask, 39, "nor", "DST", },
{ kRTypeMask, 42, "slt", "DST", },
{ kRTypeMask, 43, "sltu", "DST", },
- { kRTypeMask, 44, "dadd", "DST", },
{ kRTypeMask, 45, "daddu", "DST", },
{ kRTypeMask, 46, "dsub", "DST", },
{ kRTypeMask, 47, "dsubu", "DST", },
- // 0, 48, tge
- // 0, 49, tgeu
- // 0, 50, tlt
- // 0, 51, tltu
- // 0, 52, teq
- // 0, 54, tne
+ // TODO: seleqz, selnez
+ { kRTypeMask, 56, "dsll", "DTA", },
+ { kRTypeMask, 58, "dsrl", "DTA", },
+ { kRTypeMask, 59, "dsra", "DTA", },
+ { kRTypeMask, 60, "dsll32", "DTA", },
+ { kRTypeMask | (0x1f << 21), 62 | (1 << 21), "drotr32", "DTA", },
+ { kRTypeMask, 62, "dsrl32", "DTA", },
+ { kRTypeMask, 63, "dsra32", "DTA", },
- // SPECIAL2
- { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 2, "mul", "DST" },
- { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 32, "clz", "DS" },
- { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 36, "dclz", "DS" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 0, "madd", "ST" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 1, "maddu", "ST" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 2, "mul", "DST" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 4, "msub", "ST" },
- { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 5, "msubu", "ST" },
- { kSpecial2Mask | 0x3f, (28 << kOpcodeShift) | 0x3f, "sdbbp", "" },
+ // SPECIAL0
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 24, "mul", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 24, "muh", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 25, "mulu", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 25, "muhu", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 26, "div", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 26, "mod", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 27, "divu", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 27, "modu", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 28, "dmul", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 28, "dmuh", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 29, "dmulu", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 29, "dmuhu", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 30, "ddiv", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 30, "dmod", "DST" },
+ { kSpecial0Mask | 0x7ff, (2 << 6) | 31, "ddivu", "DST" },
+ { kSpecial0Mask | 0x7ff, (3 << 6) | 31, "dmodu", "DST" },
+ // TODO: [d]clz, [d]clo
+ // TODO: sdbbp
// J-type instructions.
{ kJTypeMask, 2 << kOpcodeShift, "j", "L" },
@@ -116,33 +117,31 @@ static const Mips64Instruction gMips64Instructions[] = {
{ kITypeMask, 5 << kOpcodeShift, "bne", "STB" },
{ kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (1 << 16), "bgez", "SB" },
{ kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (0 << 16), "bltz", "SB" },
- { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (2 << 16), "bltzl", "SB" },
- { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (16 << 16), "bltzal", "SB" },
- { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (18 << 16), "bltzall", "SB" },
{ kITypeMask | (0x1f << 16), 6 << kOpcodeShift | (0 << 16), "blez", "SB" },
{ kITypeMask | (0x1f << 16), 7 << kOpcodeShift | (0 << 16), "bgtz", "SB" },
+ { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (6 << 16), "dahi", "Si", },
+ { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (30 << 16), "dati", "Si", },
{ 0xffff0000, (4 << kOpcodeShift), "b", "B" },
{ 0xffff0000, (1 << kOpcodeShift) | (17 << 16), "bal", "B" },
- { kITypeMask, 8 << kOpcodeShift, "addi", "TSi", },
{ kITypeMask, 9 << kOpcodeShift, "addiu", "TSi", },
{ kITypeMask, 10 << kOpcodeShift, "slti", "TSi", },
{ kITypeMask, 11 << kOpcodeShift, "sltiu", "TSi", },
{ kITypeMask, 12 << kOpcodeShift, "andi", "TSi", },
{ kITypeMask, 13 << kOpcodeShift, "ori", "TSi", },
- { kITypeMask, 14 << kOpcodeShift, "ori", "TSi", },
- { kITypeMask, 15 << kOpcodeShift, "lui", "TI", },
-
- { kITypeMask, 24 << kOpcodeShift, "daddi", "TSi", },
+ { kITypeMask, 14 << kOpcodeShift, "xori", "TSi", },
+ { kITypeMask | (0x1f << 21), 15 << kOpcodeShift, "lui", "TI", },
+ { kITypeMask, 15 << kOpcodeShift, "aui", "TSI", },
{ kITypeMask, 25 << kOpcodeShift, "daddiu", "TSi", },
-
+ { kITypeMask, 29 << kOpcodeShift, "daui", "TSi", },
{ kITypeMask, 32u << kOpcodeShift, "lb", "TO", },
{ kITypeMask, 33u << kOpcodeShift, "lh", "TO", },
{ kITypeMask, 35u << kOpcodeShift, "lw", "TO", },
{ kITypeMask, 36u << kOpcodeShift, "lbu", "TO", },
{ kITypeMask, 37u << kOpcodeShift, "lhu", "TO", },
+ { kITypeMask, 39u << kOpcodeShift, "lwu", "TO", },
{ kITypeMask, 40u << kOpcodeShift, "sb", "TO", },
{ kITypeMask, 41u << kOpcodeShift, "sh", "TO", },
{ kITypeMask, 43u << kOpcodeShift, "sw", "TO", },
@@ -154,27 +153,31 @@ static const Mips64Instruction gMips64Instructions[] = {
{ kITypeMask, 63u << kOpcodeShift, "sd", "TO", },
// Floating point.
- { kFpMask, kCop1 | 0, "add", "fdst" },
- { kFpMask, kCop1 | 1, "sub", "fdst" },
- { kFpMask, kCop1 | 2, "mul", "fdst" },
- { kFpMask, kCop1 | 3, "div", "fdst" },
- { kFpMask | (0x1f << 16), kCop1 | 4, "sqrt", "fdst" },
- { kFpMask | (0x1f << 16), kCop1 | 5, "abs", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 6, "mov", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 7, "neg", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 8, "round.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 9, "trunc.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 10, "ceil.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 11, "floor.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 12, "round.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 13, "trunc.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 14, "ceil.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 15, "floor.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 32, "cvt.s", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 33, "cvt.d", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 36, "cvt.w", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 37, "cvt.l", "fds" },
- { kFpMask | (0x1f << 16), kCop1 | 38, "cvt.ps", "fds" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x00 << 21), "mfc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x01 << 21), "dmfc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x04 << 21), "mtc1", "Td" },
+ { kFpMask | (0x1f << 21), kCop1 | (0x05 << 21), "dmtc1", "Td" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 0, "add", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 1, "sub", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 2, "mul", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 3, "div", "fadt" },
+ { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 4, "sqrt", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 5, "abs", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 6, "mov", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 7, "neg", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 8, "round.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 9, "trunc.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 10, "ceil.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 11, "floor.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 12, "round.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 13, "trunc.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 14, "ceil.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 33, "cvt.d", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 36, "cvt.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 37, "cvt.l", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 38, "cvt.ps", "fad" },
};
static uint32_t ReadU32(const uint8_t* ptr) {
@@ -216,6 +219,7 @@ static void DumpMips64(std::ostream& os, const uint8_t* instr_ptr) {
break;
case 'D': args << 'r' << rd; break;
case 'd': args << 'f' << rd; break;
+ case 'a': args << 'f' << sa; break;
case 'f': // Floating point "fmt".
{
size_t fmt = (instruction >> 21) & 0x7; // TODO: other fmts?
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 6f1b8261a3..8cb95f1ab6 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -422,20 +422,120 @@ ENTRY art_quick_do_long_jump
move $v1, $zero
END art_quick_do_long_jump
-UNIMPLEMENTED art_quick_deliver_exception
-UNIMPLEMENTED art_quick_throw_null_pointer_exception
-UNIMPLEMENTED art_quick_throw_div_zero
-UNIMPLEMENTED art_quick_throw_array_bounds
-UNIMPLEMENTED art_quick_throw_stack_overflow
-UNIMPLEMENTED art_quick_throw_no_such_method
+ /*
+ * Called by managed code, saves most registers (forms basis of long jump
+ * context) and passes the bottom of the stack.
+ * artDeliverExceptionFromCode will place the callee save Method* at
+ * the bottom of the thread. On entry v0 holds Throwable*
+ */
+ENTRY art_quick_deliver_exception
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artDeliverExceptionFromCode
+ jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
+ move $a1, rSELF # pass Thread::Current
+END art_quick_deliver_exception
+
+ /*
+ * Called by managed code to create and deliver a NullPointerException
+ */
+ .extern artThrowNullPointerExceptionFromCode
+ENTRY art_quick_throw_null_pointer_exception
+.Lart_quick_throw_null_pointer_exception_gp_set:
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowNullPointerExceptionFromCode
+ jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
+END art_quick_throw_null_pointer_exception
-UNIMPLEMENTED art_quick_invoke_interface_trampoline
-UNIMPLEMENTED art_quick_invoke_interface_trampoline_with_access_check
+ /*
+ * Called by managed code to create and deliver an ArithmeticException
+ */
+ .extern artThrowDivZeroFromCode
+ENTRY art_quick_throw_div_zero
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowDivZeroFromCode
+ jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
+END art_quick_throw_div_zero
-UNIMPLEMENTED art_quick_invoke_static_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_direct_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_super_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_virtual_trampoline_with_access_check
+ /*
+ * Called by managed code to create and deliver an
+ * ArrayIndexOutOfBoundsException
+ */
+ .extern artThrowArrayBoundsFromCode
+ENTRY art_quick_throw_array_bounds
+.Lart_quick_throw_array_bounds_gp_set:
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowArrayBoundsFromCode
+ jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_throw_array_bounds
+
+ /*
+ * Called by managed code to create and deliver a StackOverflowError.
+ */
+ .extern artThrowStackOverflowFromCode
+ENTRY art_quick_throw_stack_overflow
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowStackOverflowFromCode
+ jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
+END art_quick_throw_stack_overflow
+
+ /*
+ * Called by managed code to create and deliver a NoSuchMethodError.
+ */
+ .extern artThrowNoSuchMethodFromCode
+ENTRY art_quick_throw_no_such_method
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowNoSuchMethodFromCode
+ jalr $zero, $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
+END art_quick_throw_no_such_method
+
+ /*
+ * All generated callsites for interface invokes and invocation slow paths will load arguments
+ * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
+ * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
+ * stack and call the appropriate C helper.
+ * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
+ *
+ * The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
+ * of the target Method* in $v0 and method->code_ in $v1.
+ *
+ * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * thread and we branch to another stub to deliver it.
+ *
+ * On success this wrapper will restore arguments and *jump* to the target, leaving the ra
+ * pointing back to the original caller.
+ */
+.macro INVOKE_TRAMPOLINE c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
+ lwu $a2, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE($sp) # pass caller Method*
+ move $a3, rSELF # pass Thread::Current
+ jal \cxx_name # (method_idx, this, caller, Thread*, $sp)
+ move $a4, $sp # pass $sp
+ move $a0, $v0 # save target Method*
+ move $t9, $v1 # save $v0->code_
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ beq $v0, $zero, 1f
+ nop
+ jalr $zero, $t9
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END \c_name
+.endm
+
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
# On entry:
# t0 = shorty
@@ -454,7 +554,7 @@ UNIMPLEMENTED art_quick_invoke_virtual_trampoline_with_access_check
li $t9, 74 # put char 'J' into t9
beq $t9, $t3, 3f # branch if result type char == 'J'
nop
- lwu $\gpu, 0($t1)
+ lw $\gpu, 0($t1)
sw $\gpu, 0($v0)
daddiu $v0, 4
daddiu $t1, 4
@@ -699,63 +799,534 @@ call_sfn:
sw $v1, 4($a4) # store the other half of the result
END art_quick_invoke_static_stub
+ /*
+ * Entry from managed code that calls artHandleFillArrayDataFromCode and
+ * delivers exception on failure.
+ */
+ .extern artHandleFillArrayDataFromCode
+ENTRY art_quick_handle_fill_data
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_handle_fill_data
+
+ /*
+ * Entry from managed code that calls artLockObjectFromCode, may block for GC.
+ */
+ .extern artLockObjectFromCode
+ENTRY art_quick_lock_object
+ beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ jal artLockObjectFromCode # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_lock_object
+
+ /*
+ * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
+ */
+ .extern artUnlockObjectFromCode
+ENTRY art_quick_unlock_object
+ beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ jal artUnlockObjectFromCode # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_unlock_object
+
+ /*
+ * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+ */
+ .extern artThrowClassCastException
+ENTRY art_quick_check_cast
+ daddiu $sp, $sp, -32
+ .cfi_adjust_cfa_offset 32
+ sd $ra, 24($sp)
+ .cfi_rel_offset 31, 24
+ sd $t9, 16($sp)
+ sd $a1, 8($sp)
+ sd $a0, 0($sp)
+ jal artIsAssignableFromCode
+ nop
+ beq $v0, $zero, .Lthrow_class_cast_exception
+ ld $ra, 24($sp)
+ jalr $zero, $ra
+ daddiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+.Lthrow_class_cast_exception:
+ ld $t9, 16($sp)
+ ld $a1, 8($sp)
+ ld $a0, 0($sp)
+ daddiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowClassCastException
+ jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_check_cast
+
+ /*
+ * Entry from managed code for array put operations of objects where the value being stored
+ * needs to be checked for compatibility.
+ * a0 = array, a1 = index, a2 = value
+ */
+ENTRY art_quick_aput_obj_with_null_and_bound_check
+ bne $a0, $zero, .Lart_quick_aput_obj_with_bound_check_gp_set
+ nop
+ b .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+END art_quick_aput_obj_with_null_and_bound_check
+ENTRY art_quick_aput_obj_with_bound_check
+ lwu $t0, MIRROR_ARRAY_LENGTH_OFFSET($a0)
+ sltu $t1, $a1, $t0
+ bne $t1, $zero, .Lart_quick_aput_obj_gp_set
+ nop
+ move $a0, $a1
+ b .Lart_quick_throw_array_bounds_gp_set
+ move $a1, $t0
+END art_quick_aput_obj_with_bound_check
-UNIMPLEMENTED art_quick_handle_fill_data
-UNIMPLEMENTED art_quick_lock_object
-UNIMPLEMENTED art_quick_unlock_object
-UNIMPLEMENTED art_quick_check_cast
-UNIMPLEMENTED art_quick_aput_obj_with_null_and_bound_check
-UNIMPLEMENTED art_quick_aput_obj_with_bound_check
-UNIMPLEMENTED art_quick_aput_obj
-UNIMPLEMENTED art_quick_initialize_static_storage
-UNIMPLEMENTED art_quick_initialize_type
-UNIMPLEMENTED art_quick_initialize_type_and_verify_access
-UNIMPLEMENTED art_quick_get_boolean_static
-UNIMPLEMENTED art_quick_get_byte_static
-UNIMPLEMENTED art_quick_get_char_static
-UNIMPLEMENTED art_quick_get_short_static
-UNIMPLEMENTED art_quick_get32_static
-UNIMPLEMENTED art_quick_get64_static
-UNIMPLEMENTED art_quick_get_obj_static
-UNIMPLEMENTED art_quick_get_boolean_instance
-UNIMPLEMENTED art_quick_get_byte_instance
-UNIMPLEMENTED art_quick_get_char_instance
-UNIMPLEMENTED art_quick_get_short_instance
-UNIMPLEMENTED art_quick_get32_instance
-UNIMPLEMENTED art_quick_get64_instance
-UNIMPLEMENTED art_quick_get_obj_instance
-UNIMPLEMENTED art_quick_set8_static
-UNIMPLEMENTED art_quick_set16_static
-UNIMPLEMENTED art_quick_set32_static
-UNIMPLEMENTED art_quick_set64_static
-UNIMPLEMENTED art_quick_set_obj_static
-UNIMPLEMENTED art_quick_set8_instance
-UNIMPLEMENTED art_quick_set16_instance
-UNIMPLEMENTED art_quick_set32_instance
-UNIMPLEMENTED art_quick_set64_instance
-UNIMPLEMENTED art_quick_set_obj_instance
-UNIMPLEMENTED art_quick_resolve_string
+ENTRY art_quick_aput_obj
+ beq $a2, $zero, .Ldo_aput_null
+ nop
+ lwu $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
+ lwu $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
+ lwu $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
+ bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
+ nop
+.Ldo_aput:
+ dsll $a1, $a1, 2
+ daddu $t0, $a0, $a1
+ sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
+ ld $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
+ dsrl $t1, $a0, 7
+ daddu $t1, $t1, $t0
+ sb $t0, ($t1)
+ jalr $zero, $ra
+ nop
+.Ldo_aput_null:
+ dsll $a1, $a1, 2
+ daddu $t0, $a0, $a1
+ sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
+ jalr $zero, $ra
+ nop
+.Lcheck_assignability:
+ daddiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sd $ra, 56($sp)
+ .cfi_rel_offset 31, 56
+ sd $t9, 24($sp)
+ sd $a2, 16($sp)
+ sd $a1, 8($sp)
+ sd $a0, 0($sp)
+ move $a1, $t1
+ move $a0, $t0
+ jal artIsAssignableFromCode # (Class*, Class*)
+ nop
+ ld $ra, 56($sp)
+ ld $t9, 24($sp)
+ ld $a2, 16($sp)
+ ld $a1, 8($sp)
+ ld $a0, 0($sp)
+ daddiu $sp, $sp, 64
+ .cfi_adjust_cfa_offset -64
+ bne $v0, $zero, .Ldo_aput
+ nop
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a1, $a2
+ dla $t9, artThrowArrayStoreException
+ jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_aput_obj
+
+ /*
+ * Entry from managed code when uninitialized static storage, this stub will run the class
+ * initializer and deliver the exception on error. On success the static storage base is
+ * returned.
+ */
+ .extern artInitializeStaticStorageFromCode
+ENTRY art_quick_initialize_static_storage
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*)
+ jal artInitializeStaticStorageFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_static_storage
+
+ /*
+ * Entry from managed code when dex cache misses for a type_idx.
+ */
+ .extern artInitializeTypeFromCode
+ENTRY art_quick_initialize_type
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
+ jal artInitializeTypeFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_type
+
+ /*
+ * Entry from managed code when type_idx needs to be checked for access and dex cache may also
+ * miss.
+ */
+ .extern artInitializeTypeAndVerifyAccessFromCode
+ENTRY art_quick_initialize_type_and_verify_access
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
+ jal artInitializeTypeAndVerifyAccessFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_type_and_verify_access
+
+ /*
+ * Called by managed code to resolve a static field and load a boolean primitive value.
+ */
+ .extern artGetBooleanStaticFromCode
+ENTRY art_quick_get_boolean_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_static
+
+ /*
+ * Called by managed code to resolve a static field and load a byte primitive value.
+ */
+ .extern artGetByteStaticFromCode
+ENTRY art_quick_get_byte_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_static
+
+ /*
+ * Called by managed code to resolve a static field and load a char primitive value.
+ */
+ .extern artGetCharStaticFromCode
+ENTRY art_quick_get_char_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_static
+
+ /*
+ * Called by managed code to resolve a static field and load a short primitive value.
+ */
+ .extern artGetShortStaticFromCode
+ENTRY art_quick_get_short_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_static
+
+ /*
+ * Called by managed code to resolve a static field and load a 32-bit primitive value.
+ */
+ .extern artGet32StaticFromCode
+ENTRY art_quick_get32_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get32_static
+
+ /*
+ * Called by managed code to resolve a static field and load a 64-bit primitive value.
+ */
+ .extern artGet64StaticFromCode
+ENTRY art_quick_get64_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get64_static
+
+ /*
+ * Called by managed code to resolve a static field and load an object reference.
+ */
+ .extern artGetObjStaticFromCode
+ENTRY art_quick_get_obj_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_static
+
+ /*
+ * Called by managed code to resolve an instance field and load a boolean primitive value.
+ */
+ .extern artGetBooleanInstanceFromCode
+ENTRY art_quick_get_boolean_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a byte primitive value.
+ */
+ .extern artGetByteInstanceFromCode
+ENTRY art_quick_get_byte_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a char primitive value.
+ */
+ .extern artGetCharInstanceFromCode
+ENTRY art_quick_get_char_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a short primitive value.
+ */
+ .extern artGetShortInstanceFromCode
+ENTRY art_quick_get_short_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a 32-bit primitive value.
+ */
+ .extern artGet32InstanceFromCode
+ENTRY art_quick_get32_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get32_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a 64-bit primitive value.
+ */
+ .extern artGet64InstanceFromCode
+ENTRY art_quick_get64_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get64_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load an object reference.
+ */
+ .extern artGetObjInstanceFromCode
+ENTRY art_quick_get_obj_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_instance
+
+ /*
+ * Called by managed code to resolve a static field and store a 8-bit primitive value.
+ */
+ .extern artSet8StaticFromCode
+ENTRY art_quick_set8_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set8_static
+
+ /*
+ * Called by managed code to resolve a static field and store a 16-bit primitive value.
+ */
+ .extern artSet16StaticFromCode
+ENTRY art_quick_set16_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set16_static
+
+ /*
+ * Called by managed code to resolve a static field and store a 32-bit primitive value.
+ */
+ .extern artSet32StaticFromCode
+ENTRY art_quick_set32_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set32_static
+
+ /*
+ * Called by managed code to resolve a static field and store a 64-bit primitive value.
+ */
+ .extern artSet64StaticFromCode
+ENTRY art_quick_set64_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, $a1 # pass new_val
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set64_static
+
+ /*
+ * Called by managed code to resolve a static field and store an object reference.
+ */
+ .extern artSetObjStaticFromCode
+ENTRY art_quick_set_obj_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set_obj_static
+
+ /*
+ * Called by managed code to resolve an instance field and store a 8-bit primitive value.
+ */
+ .extern artSet8InstanceFromCode
+ENTRY art_quick_set8_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set8_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store a 16-bit primitive value.
+ */
+ .extern artSet16InstanceFromCode
+ENTRY art_quick_set16_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set16_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store a 32-bit primitive value.
+ */
+ .extern artSet32InstanceFromCode
+ENTRY art_quick_set32_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set32_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+ */
+ .extern artSet64InstanceFromCode
+ENTRY art_quick_set64_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set64_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store an object reference.
+ */
+ .extern artSetObjInstanceFromCode
+ENTRY art_quick_set_obj_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set_obj_instance
+
+ /*
+ * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
+ * exception on error. On success the String is returned. R0 holds the referring method,
+ * R1 holds the string index. The fast path check for hit in strings cache has already been
+ * performed.
+ */
+ .extern artResolveStringFromCode
+ENTRY art_quick_resolve_string
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp)
+ jal artResolveStringFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_resolve_string
// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
ENTRY \name
- break
- break
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal \entrypoint
+ move $a2, rSELF # pass Thread::Current
+ \return
END \name
.endm
.macro THREE_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
ENTRY \name
- break
- break
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal \entrypoint
+ move $a3, rSELF # pass Thread::Current
+ \return
END \name
.endm
// Generate the allocation entrypoints for each allocator.
GENERATE_ALL_ALLOC_ENTRYPOINTS
-UNIMPLEMENTED art_quick_test_suspend
+ /*
+ * Called by managed code when the value in rSUSPEND has been decremented to 0.
+ */
+ .extern artTestSuspendFromCode
+ENTRY art_quick_test_suspend
+ lh $a0, THREAD_FLAGS_OFFSET(rSELF)
+ bne $a0, $zero, 1f
+ daddiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+ jalr $zero, $ra
+ nop
+1:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
+ jal artTestSuspendFromCode # (Thread*)
+ move $a0, rSELF
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_test_suspend
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
@@ -779,7 +1350,19 @@ ENTRY art_quick_proxy_invoke_handler
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
-UNIMPLEMENTED art_quick_imt_conflict_trampoline
+ /*
+ * Called to resolve an imt conflict. t0 is a hidden argument that holds the target method's
+ * dex method index.
+ */
+ENTRY art_quick_imt_conflict_trampoline
+ lwu $a0, 0($sp) # load caller Method*
+ lwu $a0, MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods
+ dsll $t0, 2 # convert target method offset to bytes
+ daddu $a0, $t0 # get address of target method
+ dla $t9, art_quick_invoke_interface_trampoline
+ jalr $zero, $t9
+ lwu $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method
+END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
@@ -930,6 +1513,18 @@ art_quick_instrumentation_exit:
.cfi_adjust_cfa_offset -(16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
END art_quick_instrumentation_exit
-UNIMPLEMENTED art_quick_deoptimize
+ /*
+ * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+ .extern artDeoptimize
+ .extern artEnterInterpreterFromDeoptimize
+ENTRY art_quick_deoptimize
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ jal artDeoptimize # artDeoptimize(Thread*, SP)
+ # Returns caller method's frame size.
+ move $a0, rSELF # pass Thread::current
+END art_quick_deoptimize
+
UNIMPLEMENTED art_quick_indexof
UNIMPLEMENTED art_quick_string_compareto
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 00251ffacc..70ee04246a 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -154,8 +154,6 @@ class QuickArgumentVisitor {
// | F7 | f_arg7
// | F6 | f_arg6
// | F5 | f_arg5
- // | F6 | f_arg6
- // | F5 | f_arg5
// | F4 | f_arg4
// | F3 | f_arg3
// | F2 | f_arg2