summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk7
-rw-r--r--compiler/common_compiler_test.cc4
-rw-r--r--compiler/common_compiler_test.h1
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc427
-rw-r--r--compiler/dex/quick/mips/call_mips.cc228
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h483
-rw-r--r--compiler/dex/quick/mips/fp_mips.cc79
-rw-r--r--compiler/dex/quick/mips/int_mips.cc453
-rw-r--r--compiler/dex/quick/mips/mips_lir.h594
-rw-r--r--compiler/dex/quick/mips/target_mips.cc658
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc485
-rw-r--r--compiler/dex/quick/mips64/assemble_mips64.cc898
-rw-r--r--compiler/dex/quick/mips64/backend_mips64.h32
-rw-r--r--compiler/dex/quick/mips64/call_mips64.cc421
-rw-r--r--compiler/dex/quick/mips64/codegen_mips64.h328
-rw-r--r--compiler/dex/quick/mips64/fp_mips64.cc253
-rw-r--r--compiler/dex/quick/mips64/int_mips64.cc692
-rw-r--r--compiler/dex/quick/mips64/mips64_lir.h648
-rw-r--r--compiler/dex/quick/mips64/target_mips64.cc653
-rw-r--r--compiler/dex/quick/mips64/utility_mips64.cc875
-rw-r--r--compiler/dex/quick/quick_compiler.cc6
-rw-r--r--compiler/dex/quick_compiler_callbacks.h5
-rw-r--r--compiler/driver/compiler_driver_test.cc5
-rw-r--r--compiler/dwarf/debug_frame_opcode_writer.h282
-rw-r--r--compiler/dwarf/debug_frame_writer.h96
-rw-r--r--compiler/dwarf/debug_line_opcode_writer.h243
-rw-r--r--compiler/dwarf/debug_line_writer.h87
-rw-r--r--compiler/dwarf/dwarf_test.cc235
-rw-r--r--compiler/dwarf/dwarf_test.h220
-rw-r--r--compiler/dwarf/register.h58
-rw-r--r--compiler/dwarf/writer.h159
-rw-r--r--compiler/elf_writer_test.cc4
-rw-r--r--compiler/jit/jit_compiler.cc3
-rw-r--r--compiler/oat_test.cc2
-rw-r--r--compiler/optimizing/boolean_simplifier.cc10
-rw-r--r--compiler/optimizing/code_generator_arm.cc99
-rw-r--r--compiler/optimizing/code_generator_x86.cc2
-rw-r--r--compiler/optimizing/inliner.cc3
-rw-r--r--compiler/optimizing/intrinsics.cc6
-rw-r--r--compiler/optimizing/intrinsics_arm.cc4
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc4
-rw-r--r--compiler/optimizing/intrinsics_list.h2
-rw-r--r--compiler/optimizing/intrinsics_x86.cc4
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc4
-rw-r--r--compiler/optimizing/optimizing_compiler.cc9
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h18
-rw-r--r--compiler/optimizing/parallel_move_resolver.cc58
-rw-r--r--compiler/optimizing/parallel_move_test.cc27
-rw-r--r--compiler/optimizing/register_allocator.cc83
-rw-r--r--compiler/optimizing/register_allocator.h7
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc2
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h19
-rw-r--r--compiler/optimizing/stack_map_stream.h173
-rw-r--r--compiler/optimizing/stack_map_test.cc333
54 files changed, 4342 insertions, 6149 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 0247c9d62c..904f117a5a 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -48,12 +48,6 @@ LIBART_COMPILER_SRC_FILES := \
dex/quick/mips/int_mips.cc \
dex/quick/mips/target_mips.cc \
dex/quick/mips/utility_mips.cc \
- dex/quick/mips64/assemble_mips64.cc \
- dex/quick/mips64/call_mips64.cc \
- dex/quick/mips64/fp_mips64.cc \
- dex/quick/mips64/int_mips64.cc \
- dex/quick/mips64/target_mips64.cc \
- dex/quick/mips64/utility_mips64.cc \
dex/quick/mir_to_lir.cc \
dex/quick/quick_compiler.cc \
dex/quick/ralloc_util.cc \
@@ -163,7 +157,6 @@ LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
dex/quick/arm/arm_lir.h \
dex/quick/arm64/arm64_lir.h \
dex/quick/mips/mips_lir.h \
- dex/quick/mips64/mips64_lir.h \
dex/quick/resource_mask.h \
dex/compiler_enums.h \
dex/global_value_numbering.h \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index dab28bcacd..96d90bb443 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -179,8 +179,8 @@ void CommonCompilerTest::SetUpRuntimeOptions(RuntimeOptions* options) {
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
- method_inliner_map_.get()));
- options->push_back(std::make_pair("compilercallbacks", callbacks_.get()));
+ method_inliner_map_.get(),
+ CompilerCallbacks::CallbackMode::kCompileApp));
}
void CommonCompilerTest::TearDown() {
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 9cffbc86f3..d7b210d571 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -78,7 +78,6 @@ class CommonCompilerTest : public CommonRuntimeTest {
std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<VerificationResults> verification_results_;
std::unique_ptr<DexFileToMethodInlinerMap> method_inliner_map_;
- std::unique_ptr<CompilerCallbacks> callbacks_;
std::unique_ptr<CompilerDriver> compiler_driver_;
std::unique_ptr<CumulativeLogger> timer_;
std::unique_ptr<const InstructionSetFeatures> instruction_set_features_;
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index ed72d676b7..936ff42c8c 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -77,7 +77,7 @@ namespace art {
*
* [!] escape. To insert "!", use "!!"
*/
-/* NOTE: must be kept in sync with enum MipsOpcode from LIR.h */
+/* NOTE: must be kept in sync with enum MipsOpcode from mips_lir.h */
/*
* TUNING: We're currently punting on the branch delay slots. All branch
* instructions in this map are given a size of 8, which during assembly
@@ -85,6 +85,7 @@ namespace art {
* an assembler pass to fill those slots when possible.
*/
const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
+ // The following are common mips32r2, mips32r6 and mips64r6 instructions.
ENCODING_MAP(kMips32BitData, 0x00000000,
kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP,
@@ -117,7 +118,7 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
- ENCODING_MAP(kMipsBeqz, 0x10000000, /* same as beq above with t = $zero */
+ ENCODING_MAP(kMipsBeqz, 0x10000000, // Same as beq above with t = $zero.
kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
@@ -137,7 +138,7 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMipsBnez, 0x14000000, /* same as bne below with t = $zero */
+ ENCODING_MAP(kMipsBnez, 0x14000000, // Same as bne below with t = $zero.
kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
@@ -145,14 +146,98 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
- ENCODING_MAP(kMipsDiv, 0x0000001a,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
- "div", "!0r,!1r", 4),
ENCODING_MAP(kMipsExt, 0x7c000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
"ext", "!0r,!1r,!2d,!3D", 4),
+ ENCODING_MAP(kMipsFaddd, 0x46200000,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFadds, 0x46000000,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "add.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFsubd, 0x46200001,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFsubs, 0x46000001,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "sub.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFdivd, 0x46200003,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFdivs, 0x46000003,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "div.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFmuld, 0x46200002,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.d", "!0S,!1S,!2S", 4),
+ ENCODING_MAP(kMipsFmuls, 0x46000002,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "mul.s", "!0s,!1s,!2s", 4),
+ ENCODING_MAP(kMipsFcvtsd, 0x46200020,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMipsFcvtsw, 0x46800020,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.s.w", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFcvtds, 0x46000021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.s", "!0S,!1s", 4),
+ ENCODING_MAP(kMipsFcvtdw, 0x46800021,
+ kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.d.w", "!0S,!1s", 4),
+ ENCODING_MAP(kMipsFcvtwd, 0x46200024,
+ kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.d", "!0s,!1S", 4),
+ ENCODING_MAP(kMipsFcvtws, 0x46000024,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "cvt.w.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFmovd, 0x46200006,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.d", "!0S,!1S", 4),
+ ENCODING_MAP(kMipsFmovs, 0x46000006,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mov.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFnegd, 0x46200007,
+ kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "neg.d", "!0S,!1S", 4),
+ ENCODING_MAP(kMipsFnegs, 0x46000007,
+ kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "neg.s", "!0s,!1s", 4),
+ ENCODING_MAP(kMipsFldc1, 0xd4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "ldc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsFlwc1, 0xc4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
+ "lwc1", "!0s,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsFsdc1, 0xf4000000,
+ kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "sdc1", "!0S,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsFswc1, 0xe4000000,
+ kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
+ "swc1", "!0s,!1d(!2r)", 4),
ENCODING_MAP(kMipsJal, 0x0c000000,
kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
@@ -197,31 +282,31 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
"lw", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMipsMfhi, 0x00000010,
- kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_HI,
- "mfhi", "!0r", 4),
- ENCODING_MAP(kMipsMflo, 0x00000012,
- kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_LO,
- "mflo", "!0r", 4),
- ENCODING_MAP(kMipsMove, 0x00000025, /* or using zero reg */
+ ENCODING_MAP(kMipsMove, 0x00000025, // Or using zero reg.
kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"move", "!0r,!1r", 4),
- ENCODING_MAP(kMipsMovz, 0x0000000a,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "movz", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMipsMul, 0x70000002,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsMfc1, 0x44000000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mfc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMipsMtc1, 0x44800000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "mtc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMipsMfhc1, 0x44600000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
+ "mfhc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMipsMthc1, 0x44e00000,
+ kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "mthc1", "!0r,!1s", 4),
ENCODING_MAP(kMipsNop, 0x00000000,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, NO_OPERAND,
"nop", ";", 4),
- ENCODING_MAP(kMipsNor, 0x00000027, /* used for "not" too */
+ ENCODING_MAP(kMipsNor, 0x00000027, // Used for "not" too.
kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"nor", "!0r,!1r,!2r", 4),
@@ -289,7 +374,7 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"srlv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMipsSubu, 0x00000023, /* used for "neg" too */
+ ENCODING_MAP(kMipsSubu, 0x00000023, // Used for "neg" too.
kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"subu", "!0r,!1r,!2r", 4),
@@ -297,6 +382,10 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
"sw", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMipsSync, 0x0000000f,
+ kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP,
+ "sync", ";", 4),
ENCODING_MAP(kMipsXor, 0x00000026,
kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
@@ -305,103 +394,143 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
"xori", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMipsFadds, 0x46000000,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+
+ // The following are mips32r2 instructions.
+ ENCODING_MAP(kMipsR2Div, 0x0000001a,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
+ "div", "!0r,!1r", 4),
+ ENCODING_MAP(kMipsR2Mul, 0x70000002,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "add.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMipsFsubs, 0x46000001,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ "mul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsR2Mfhi, 0x00000010,
+ kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_HI,
+ "mfhi", "!0r", 4),
+ ENCODING_MAP(kMipsR2Mflo, 0x00000012,
+ kFmtBitBlt, 15, 11, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_UNARY_OP | REG_DEF0 | REG_USE_LO,
+ "mflo", "!0r", 4),
+ ENCODING_MAP(kMipsR2Movz, 0x0000000a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sub.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMipsFmuls, 0x46000002,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ "movz", "!0r,!1r,!2r", 4),
+
+ // The following are mips32r6 and mips64r6 instructions.
+ ENCODING_MAP(kMipsR6Div, 0x0000009a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMipsFdivs, 0x46000003,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
+ "div", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsR6Mod, 0x000000da,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMipsFaddd, 0x46200000,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ "mod", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMipsR6Mul, 0x00000098,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "add.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMipsFsubd, 0x46200001,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ "mul", "!0r,!1r,!2r", 4),
+
+ // The following are mips64r6 instructions.
+ ENCODING_MAP(kMips64Daddiu, 0x64000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "daddiu", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Daddu, 0x0000002d,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sub.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMipsFmuld, 0x46200002,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ "daddu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dahi, 0x04060000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+ "dahi", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Dati, 0x041E0000,
+ kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
+ "dati", "!0r,0x!1h(!1d)", 4),
+ ENCODING_MAP(kMips64Daui, 0x74000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "daui", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Ddiv, 0x0000009e,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMipsFdivd, 0x46200003,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
+ "ddiv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmod, 0x000000de,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMipsFcvtsd, 0x46200020,
- kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.s.d", "!0s,!1S", 4),
- ENCODING_MAP(kMipsFcvtsw, 0x46800020,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.s.w", "!0s,!1s", 4),
- ENCODING_MAP(kMipsFcvtds, 0x46000021,
- kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.d.s", "!0S,!1s", 4),
- ENCODING_MAP(kMipsFcvtdw, 0x46800021,
- kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.d.w", "!0S,!1s", 4),
- ENCODING_MAP(kMipsFcvtws, 0x46000024,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.w.s", "!0s,!1s", 4),
- ENCODING_MAP(kMipsFcvtwd, 0x46200024,
- kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.w.d", "!0s,!1S", 4),
- ENCODING_MAP(kMipsFmovs, 0x46000006,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mov.s", "!0s,!1s", 4),
- ENCODING_MAP(kMipsFmovd, 0x46200006,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ "dmod", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmul, 0x0000009c,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dmul", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dmfc1, 0x44200000,
+ kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mov.d", "!0S,!1S", 4),
- ENCODING_MAP(kMipsFlwc1, 0xC4000000,
- kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ "dmfc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Dmtc1, 0x44a00000,
+ kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
+ "dmtc1", "!0r,!1s", 4),
+ ENCODING_MAP(kMips64Drotr32, 0x0000003e | (1 << 21),
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "drotr32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsll, 0x00000038,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsll", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsll32, 0x0000003c,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsll32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsrl, 0x0000003a,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsrl", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsrl32, 0x0000003e,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsrl32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsra, 0x0000003b,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsra", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsra32, 0x0000003f,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
+ "dsra32", "!0r,!1r,0x!2h(!2d)", 4),
+ ENCODING_MAP(kMips64Dsllv, 0x00000014,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsllv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsrlv, 0x00000016,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsrlv", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsrav, 0x00000017,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsrav", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Dsubu, 0x0000002f,
+ kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
+ "dsubu", "!0r,!1r,!2r", 4),
+ ENCODING_MAP(kMips64Ld, 0xdc000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lwc1", "!0s,!1d(!2r)", 4),
- ENCODING_MAP(kMipsFldc1, 0xD4000000,
- kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ "ld", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Lwu, 0x9c000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "ldc1", "!0S,!1d(!2r)", 4),
- ENCODING_MAP(kMipsFswc1, 0xE4000000,
- kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "swc1", "!0s,!1d(!2r)", 4),
- ENCODING_MAP(kMipsFsdc1, 0xF4000000,
- kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
+ "lwu", "!0r,!1d(!2r)", 4),
+ ENCODING_MAP(kMips64Sd, 0xfc000000,
+ kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sdc1", "!0S,!1d(!2r)", 4),
- ENCODING_MAP(kMipsMfc1, 0x44000000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mfc1", "!0r,!1s", 4),
- ENCODING_MAP(kMipsMtc1, 0x44800000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
- "mtc1", "!0r,!1s", 4),
- ENCODING_MAP(kMipsMfhc1, 0x44600000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mfhc1", "!0r,!1s", 4),
- ENCODING_MAP(kMipsMthc1, 0x44e00000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
- "mthc1", "!0r,!1s", 4),
- ENCODING_MAP(kMipsDelta, 0x27e00000,
+ "sd", "!0r,!1d(!2r)", 4),
+
+ // The following are pseudoinstructions.
+ ENCODING_MAP(kMipsDelta, 0x27e00000, // It is implemented as daddiu for mips64.
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
NEEDS_FIXUP, "addiu", "!0r,ra,0x!1h(!1d)", 4),
@@ -417,25 +546,6 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
"addiu", "ra,pc,8", 4),
- ENCODING_MAP(kMipsSync, 0x0000000f,
- kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
- "sync", ";", 4),
-
- // The following are mips32r6 instructions.
- ENCODING_MAP(kMipsR6Div, 0x0000009a,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMipsR6Mod, 0x000000da,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mod", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMipsR6Mul, 0x00000098,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul", "!0r,!1r,!2r", 4),
-
ENCODING_MAP(kMipsUndefined, 0x64000000,
kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, NO_OPERAND,
@@ -538,14 +648,13 @@ void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
*/
AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
LIR *lir;
- AssemblerStatus res = kSuccess; // Assume success
+ AssemblerStatus res = kSuccess; // Assume success.
for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
continue;
}
-
if (lir->flags.is_nop) {
continue;
}
@@ -567,23 +676,31 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
int delta = offset2 - offset1;
if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
- // Fits
+ // Fits.
lir->operands[1] = delta;
+ if (cu_->target64) {
+ LIR *new_addiu = RawLIR(lir->dalvik_offset, kMips64Daddiu, lir->operands[0], rRAd,
+ delta);
+ InsertLIRBefore(lir, new_addiu);
+ NopLIR(lir);
+ res = kRetryAll;
+ }
} else {
- // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair
- LIR *new_delta_hi =
- RawLIR(lir->dalvik_offset, kMipsDeltaHi,
- lir->operands[0], 0, lir->operands[2],
- lir->operands[3], 0, lir->target);
+ // Doesn't fit - must expand to kMipsDelta[Hi|Lo] pair.
+ LIR *new_delta_hi = RawLIR(lir->dalvik_offset, kMipsDeltaHi, lir->operands[0], 0,
+ lir->operands[2], lir->operands[3], 0, lir->target);
InsertLIRBefore(lir, new_delta_hi);
- LIR *new_delta_lo =
- RawLIR(lir->dalvik_offset, kMipsDeltaLo,
- lir->operands[0], 0, lir->operands[2],
- lir->operands[3], 0, lir->target);
+ LIR *new_delta_lo = RawLIR(lir->dalvik_offset, kMipsDeltaLo, lir->operands[0], 0,
+ lir->operands[2], lir->operands[3], 0, lir->target);
InsertLIRBefore(lir, new_delta_lo);
- LIR *new_addu =
- RawLIR(lir->dalvik_offset, kMipsAddu,
- lir->operands[0], lir->operands[0], rRA);
+ LIR *new_addu;
+ if (cu_->target64) {
+ new_addu = RawLIR(lir->dalvik_offset, kMips64Daddu, lir->operands[0], lir->operands[0],
+ rRAd);
+ } else {
+ new_addu = RawLIR(lir->dalvik_offset, kMipsAddu, lir->operands[0], lir->operands[0],
+ rRA);
+ }
InsertLIRBefore(lir, new_addu);
NopLIR(lir);
res = kRetryAll;
@@ -698,7 +815,9 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
case kFmtDfp: {
// TODO: do we need to adjust now that we're using 64BitSolo?
DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
- DCHECK_EQ((operand & 0x1), 0U);
+ if (!cu_->target64) {
+ DCHECK_EQ((operand & 0x1), 0U); // May only use even numbered registers for mips32.
+ }
value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
((1 << (encoder->field_loc[i].end + 1)) - 1);
bits |= value;
@@ -719,7 +838,7 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) {
code_buffer_.push_back((bits >> 8) & 0xff);
code_buffer_.push_back((bits >> 16) & 0xff);
code_buffer_.push_back((bits >> 24) & 0xff);
- // TUNING: replace with proper delay slot handling
+ // TUNING: replace with proper delay slot handling.
if (encoder->size == 8) {
DCHECK(!IsPseudoLirOp(lir->opcode));
const MipsEncodingMap *encoder2 = &EncodingMap[kMipsNop];
@@ -758,7 +877,7 @@ int MipsMir2Lir::AssignInsnOffsets() {
lir->operands[0] = 0;
}
}
- /* Pseudo opcodes don't consume space */
+ // Pseudo opcodes don't consume space.
}
return offset;
}
@@ -771,10 +890,10 @@ int MipsMir2Lir::AssignInsnOffsets() {
void MipsMir2Lir::AssignOffsets() {
int offset = AssignInsnOffsets();
- /* Const values have to be word aligned */
+ // Const values have to be word aligned.
offset = RoundUp(offset, 4);
- /* Set up offsets for literals */
+ // Set up offsets for literals.
data_offset_ = offset;
offset = AssignLiteralOffset(offset);
@@ -811,19 +930,19 @@ void MipsMir2Lir::AssembleLIR() {
CodegenDump();
LOG(FATAL) << "Assembler error - too many retries";
}
- // Redo offsets and try again
+ // Redo offsets and try again.
AssignOffsets();
code_buffer_.clear();
}
}
- // Install literals
+ // Install literals.
InstallLiteralPools();
- // Install switch tables
+ // Install switch tables.
InstallSwitchTables();
- // Install fill array data
+ // Install fill array data.
InstallFillArrayData();
// Create the mapping table and native offset to reference map.
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index b067221c27..de66b35418 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -68,7 +68,7 @@ bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& s
*/
void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- // Add the table to the list - we'll process it later
+ // Add the table to the list - we'll process it later.
SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
tab_rec->switch_mir = mir;
@@ -77,39 +77,39 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
int elements = table[1];
switch_tables_.push_back(tab_rec);
- // The table is composed of 8-byte key/disp pairs
+ // The table is composed of 8-byte key/disp pairs.
int byte_size = elements * 8;
int size_hi = byte_size >> 16;
int size_lo = byte_size & 0xffff;
- RegStorage r_end = AllocTemp();
+ RegStorage r_end = AllocPtrSizeTemp();
if (size_hi) {
NewLIR2(kMipsLui, r_end.GetReg(), size_hi);
}
- // Must prevent code motion for the curr pc pair
+ // Must prevent code motion for the curr pc pair.
GenBarrier(); // Scheduling barrier
- NewLIR0(kMipsCurrPC); // Really a jal to .+8
- // Now, fill the branch delay slot
+ NewLIR0(kMipsCurrPC); // Really a jal to .+8.
+ // Now, fill the branch delay slot.
if (size_hi) {
NewLIR3(kMipsOri, r_end.GetReg(), r_end.GetReg(), size_lo);
} else {
NewLIR3(kMipsOri, r_end.GetReg(), rZERO, size_lo);
}
- GenBarrier(); // Scheduling barrier
+ GenBarrier(); // Scheduling barrier.
- // Construct BaseLabel and set up table base register
+ // Construct BaseLabel and set up table base register.
LIR* base_label = NewLIR0(kPseudoTargetLabel);
- // Remember base label so offsets can be computed later
+ // Remember base label so offsets can be computed later.
tab_rec->anchor = base_label;
- RegStorage r_base = AllocTemp();
+ RegStorage r_base = AllocPtrSizeTemp();
NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
OpRegRegReg(kOpAdd, r_end, r_end, r_base);
- // Grab switch test value
+ // Grab switch test value.
rl_src = LoadValue(rl_src, kCoreReg);
- // Test loop
+ // Test loop.
RegStorage r_key = AllocTemp();
LIR* loop_label = NewLIR0(kPseudoTargetLabel);
LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
@@ -118,10 +118,10 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
RegStorage r_disp = AllocTemp();
Load32Disp(r_base, -4, r_disp);
- OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
- OpReg(kOpBx, rs_rRA);
-
- // Loop exit
+ const RegStorage rs_ra = TargetPtrReg(kLr);
+ OpRegRegReg(kOpAdd, rs_ra, rs_ra, r_disp);
+ OpReg(kOpBx, rs_ra);
+ // Loop exit.
LIR* exit_label = NewLIR0(kPseudoTargetLabel);
exit_branch->target = exit_label;
}
@@ -141,7 +141,7 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
*/
void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- // Add the table to the list - we'll process it later
+ // Add the table to the list - we'll process it later.
SwitchTable* tab_rec =
static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
tab_rec->switch_mir = mir;
@@ -150,10 +150,10 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
int size = table[1];
switch_tables_.push_back(tab_rec);
- // Get the switch value
+ // Get the switch value.
rl_src = LoadValue(rl_src, kCoreReg);
- // Prepare the bias. If too big, handle 1st stage here
+ // Prepare the bias. If too big, handle 1st stage here.
int low_key = s4FromSwitchData(&table[2]);
bool large_bias = false;
RegStorage r_key;
@@ -167,10 +167,10 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
r_key = AllocTemp();
}
- // Must prevent code motion for the curr pc pair
+ // Must prevent code motion for the curr pc pair.
GenBarrier();
- NewLIR0(kMipsCurrPC); // Really a jal to .+8
- // Now, fill the branch delay slot with bias strip
+ NewLIR0(kMipsCurrPC); // Really a jal to .+8.
+ // Now, fill the branch delay slot with bias strip.
if (low_key == 0) {
NewLIR0(kMipsNop);
} else {
@@ -180,51 +180,60 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
}
}
- GenBarrier(); // Scheduling barrier
+ GenBarrier(); // Scheduling barrier.
- // Construct BaseLabel and set up table base register
+ // Construct BaseLabel and set up table base register.
LIR* base_label = NewLIR0(kPseudoTargetLabel);
- // Remember base label so offsets can be computed later
+ // Remember base label so offsets can be computed later.
tab_rec->anchor = base_label;
- // Bounds check - if < 0 or >= size continue following switch
+ // Bounds check - if < 0 or >= size continue following switch.
LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
- // Materialize the table base pointer
- RegStorage r_base = AllocTemp();
+ // Materialize the table base pointer.
+ RegStorage r_base = AllocPtrSizeTemp();
NewLIR4(kMipsDelta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
- // Load the displacement from the switch table
+ // Load the displacement from the switch table.
RegStorage r_disp = AllocTemp();
LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
- // Add to rAP and go
- OpRegRegReg(kOpAdd, rs_rRA, rs_rRA, r_disp);
- OpReg(kOpBx, rs_rRA);
+ // Add to rRA and go.
+ const RegStorage rs_ra = TargetPtrReg(kLr);
+ OpRegRegReg(kOpAdd, rs_ra, rs_ra, r_disp);
+ OpReg(kOpBx, rs_ra);
- /* branch_over target here */
+ // Branch_over target here.
LIR* target = NewLIR0(kPseudoTargetLabel);
branch_over->target = target;
}
void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
- int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
+ int ex_offset = cu_->target64 ? Thread::ExceptionOffset<8>().Int32Value() :
+ Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
RegStorage reset_reg = AllocTempRef();
- LoadRefDisp(rs_rMIPS_SELF, ex_offset, rl_result.reg, kNotVolatile);
+ LoadRefDisp(TargetPtrReg(kSelf), ex_offset, rl_result.reg, kNotVolatile);
LoadConstant(reset_reg, 0);
- StoreRefDisp(rs_rMIPS_SELF, ex_offset, reset_reg, kNotVolatile);
+ StoreRefDisp(TargetPtrReg(kSelf), ex_offset, reset_reg, kNotVolatile);
FreeTemp(reset_reg);
StoreValue(rl_dest, rl_result);
}
void MipsMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
- RegStorage reg_card_base = AllocTemp();
- RegStorage reg_card_no = AllocTemp();
- // NOTE: native pointer.
- LoadWordDisp(rs_rMIPS_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
- OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
- StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
+ RegStorage reg_card_base = AllocPtrSizeTemp();
+ RegStorage reg_card_no = AllocPtrSizeTemp();
+ if (cu_->target64) {
+ // NOTE: native pointer.
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
+ OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+ StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 0, kUnsignedByte);
+ } else {
+ // NOTE: native pointer.
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
+ OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+ StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
+ }
FreeTemp(reg_card_base);
FreeTemp(reg_card_no);
}
@@ -232,33 +241,57 @@ void MipsMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
int spill_count = num_core_spills_ + num_fp_spills_;
/*
- * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live. Let the register
- * allocation mechanism know so it doesn't try to use any of them when
- * expanding the frame or flushing. This leaves the utility
- * code with a single temp: r12. This should be enough.
+ * On entry, A0, A1, A2 & A3 are live. On Mips64, A4, A5, A6 & A7 are also live.
+ * Let the register allocation mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing.
*/
- LockTemp(rs_rMIPS_ARG0);
- LockTemp(rs_rMIPS_ARG1);
- LockTemp(rs_rMIPS_ARG2);
- LockTemp(rs_rMIPS_ARG3);
+ const RegStorage arg0 = TargetReg(kArg0);
+ const RegStorage arg1 = TargetReg(kArg1);
+ const RegStorage arg2 = TargetReg(kArg2);
+ const RegStorage arg3 = TargetReg(kArg3);
+ const RegStorage arg4 = TargetReg(kArg4);
+ const RegStorage arg5 = TargetReg(kArg5);
+ const RegStorage arg6 = TargetReg(kArg6);
+ const RegStorage arg7 = TargetReg(kArg7);
+
+ LockTemp(arg0);
+ LockTemp(arg1);
+ LockTemp(arg2);
+ LockTemp(arg3);
+ if (cu_->target64) {
+ LockTemp(arg4);
+ LockTemp(arg5);
+ LockTemp(arg6);
+ LockTemp(arg7);
+ }
+
+ bool skip_overflow_check;
+ InstructionSet target = (cu_->target64) ? kMips64 : kMips;
+ int ptr_size = cu_->target64 ? 8 : 4;
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kMips);
+
+ skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, target);
NewLIR0(kPseudoMethodEntry);
- RegStorage check_reg = AllocTemp();
- RegStorage new_sp = AllocTemp();
+ RegStorage check_reg = AllocPtrSizeTemp();
+ RegStorage new_sp = AllocPtrSizeTemp();
+ const RegStorage rs_sp = TargetPtrReg(kSp);
if (!skip_overflow_check) {
- /* Load stack limit */
- Load32Disp(rs_rMIPS_SELF, Thread::StackEndOffset<4>().Int32Value(), check_reg);
+ // Load stack limit.
+ if (cu_->target64) {
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::StackEndOffset<8>().Int32Value(), check_reg);
+ } else {
+ Load32Disp(TargetPtrReg(kSelf), Thread::StackEndOffset<4>().Int32Value(), check_reg);
+ }
}
- /* Spill core callee saves */
+ // Spill core callee saves.
SpillCoreRegs();
- /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+ // NOTE: promotion of FP regs currently unsupported, thus no FP spill.
DCHECK_EQ(num_fp_spills_, 0);
- const int frame_sub = frame_size_ - spill_count * 4;
+ const int frame_sub = frame_size_ - spill_count * ptr_size;
if (!skip_overflow_check) {
class StackOverflowSlowPath : public LIRSlowPath {
public:
@@ -269,9 +302,9 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- // LR is offset 0 since we push in reverse order.
- m2l_->Load32Disp(rs_rMIPS_SP, 0, rs_rRA);
- m2l_->OpRegImm(kOpAdd, rs_rMIPS_SP, sp_displace_);
+ // RA is offset 0 since we push in reverse order.
+ m2l_->LoadWordDisp(m2l_->TargetPtrReg(kSp), 0, m2l_->TargetPtrReg(kLr));
+ m2l_->OpRegImm(kOpAdd, m2l_->TargetPtrReg(kSp), sp_displace_);
m2l_->ClobberCallerSave();
RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow); // Doesn't clobber LR.
m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
@@ -281,21 +314,27 @@ void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
private:
const size_t sp_displace_;
};
- OpRegRegImm(kOpSub, new_sp, rs_rMIPS_SP, frame_sub);
+ OpRegRegImm(kOpSub, new_sp, rs_sp, frame_sub);
LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
- AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 4));
+ AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * ptr_size));
// TODO: avoid copy for small frame sizes.
- OpRegCopy(rs_rMIPS_SP, new_sp); // Establish stack
+ OpRegCopy(rs_sp, new_sp); // Establish stack.
} else {
- OpRegImm(kOpSub, rs_rMIPS_SP, frame_sub);
+ OpRegImm(kOpSub, rs_sp, frame_sub);
}
FlushIns(ArgLocs, rl_method);
- FreeTemp(rs_rMIPS_ARG0);
- FreeTemp(rs_rMIPS_ARG1);
- FreeTemp(rs_rMIPS_ARG2);
- FreeTemp(rs_rMIPS_ARG3);
+ FreeTemp(arg0);
+ FreeTemp(arg1);
+ FreeTemp(arg2);
+ FreeTemp(arg3);
+ if (cu_->target64) {
+ FreeTemp(arg4);
+ FreeTemp(arg5);
+ FreeTemp(arg6);
+ FreeTemp(arg7);
+ }
}
void MipsMir2Lir::GenExitSequence() {
@@ -303,58 +342,67 @@ void MipsMir2Lir::GenExitSequence() {
* In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
* allocated by the register utilities as temps.
*/
- LockTemp(rs_rMIPS_RET0);
- LockTemp(rs_rMIPS_RET1);
+ LockTemp(TargetPtrReg(kRet0));
+ LockTemp(TargetPtrReg(kRet1));
NewLIR0(kPseudoMethodExit);
UnSpillCoreRegs();
- OpReg(kOpBx, rs_rRA);
+ OpReg(kOpBx, TargetPtrReg(kLr));
}
void MipsMir2Lir::GenSpecialExitSequence() {
- OpReg(kOpBx, rs_rRA);
+ OpReg(kOpBx, TargetPtrReg(kLr));
}
void MipsMir2Lir::GenSpecialEntryForSuspend() {
- // Keep 16-byte stack alignment - push A0, i.e. ArtMethod*, 2 filler words and RA.
- core_spill_mask_ = (1u << rs_rRA.GetRegNum());
+ // Keep 16-byte stack alignment - push A0, i.e. ArtMethod*, 2 filler words and RA for mips32,
+ // but A0 and RA for mips64.
+ core_spill_mask_ = (1u << TargetPtrReg(kLr).GetRegNum());
num_core_spills_ = 1u;
fp_spill_mask_ = 0u;
num_fp_spills_ = 0u;
frame_size_ = 16u;
core_vmap_table_.clear();
fp_vmap_table_.clear();
- OpRegImm(kOpSub, rs_rMIPS_SP, frame_size_);
- Store32Disp(rs_rMIPS_SP, frame_size_ - 4, rs_rRA);
- Store32Disp(rs_rMIPS_SP, 0, rs_rA0);
+ const RegStorage rs_sp = TargetPtrReg(kSp);
+ OpRegImm(kOpSub, rs_sp, frame_size_);
+ StoreWordDisp(rs_sp, frame_size_ - (cu_->target64 ? 8 : 4), TargetPtrReg(kLr));
+ StoreWordDisp(rs_sp, 0, TargetPtrReg(kArg0));
}
void MipsMir2Lir::GenSpecialExitForSuspend() {
// Pop the frame. Don't pop ArtMethod*, it's no longer needed.
- Load32Disp(rs_rMIPS_SP, frame_size_ - 4, rs_rRA);
- OpRegImm(kOpAdd, rs_rMIPS_SP, frame_size_);
+ const RegStorage rs_sp = TargetPtrReg(kSp);
+ LoadWordDisp(rs_sp, frame_size_ - (cu_->target64 ? 8 : 4), TargetPtrReg(kLr));
+ OpRegImm(kOpAdd, rs_sp, frame_size_);
}
/*
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
- int state, const MethodReference& target_method,
- uint32_t,
- uintptr_t direct_code, uintptr_t direct_method,
- InvokeType type) {
+static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
+ const MethodReference& target_method, uint32_t, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_code != 0 && direct_method != 0) {
switch (state) {
case 0: // Get the current Method* [sets kArg0]
if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ if (cu->target64) {
+ cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ }
} else {
cg->LoadCodeAddress(target_method, type, kInvokeTgt);
}
if (direct_method != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ if (cu->target64) {
+ cg->LoadConstantWide(cg->TargetReg(kArg0, kRef), direct_method);
+ } else {
+ cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method);
+ }
} else {
cg->LoadMethodAddress(target_method, type, kArg0);
}
@@ -377,7 +425,11 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
// Set up direct code if known.
if (direct_code != 0) {
if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ if (cu->target64) {
+ cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ } else {
+ cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code);
+ }
} else {
CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
cg->LoadCodeAddress(target_method, type, kInvokeTgt);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 649b6c9417..713264e0d9 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
#define ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
+#include "dex/compiler_ir.h"
#include "dex/quick/mir_to_lir.h"
#include "mips_lir.h"
@@ -39,215 +40,303 @@ class MipsMir2Lir FINAL : public Mir2Lir {
size_t cur_core_reg_;
};
+ class InToRegStorageMips64Mapper : public InToRegStorageMapper {
+ public:
+ explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {}
+ virtual RegStorage GetNextReg(ShortyArg arg);
+ virtual void Reset() OVERRIDE {
+ cur_arg_reg_ = 0;
+ }
+ protected:
+ Mir2Lir* m2l_;
+ private:
+ size_t cur_arg_reg_;
+ };
+
+ InToRegStorageMips64Mapper in_to_reg_storage_mips64_mapper_;
InToRegStorageMipsMapper in_to_reg_storage_mips_mapper_;
InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
- in_to_reg_storage_mips_mapper_.Reset();
- return &in_to_reg_storage_mips_mapper_;
+ InToRegStorageMapper* res;
+ if (cu_->target64) {
+ res = &in_to_reg_storage_mips64_mapper_;
+ } else {
+ res = &in_to_reg_storage_mips_mapper_;
+ }
+ res->Reset();
+ return res;
}
- public:
- MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
-
- // Required for target - codegen utilities.
- bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit);
- bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
- void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) OVERRIDE;
- void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) OVERRIDE;
- LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
- LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
- OpSize size) OVERRIDE;
- LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
- LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
- OpSize size) OVERRIDE;
- LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
- LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
-
- /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
- void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
-
- // Required for target - register utilities.
- RegStorage Solo64ToPair64(RegStorage reg);
- RegStorage Fp64ToSolo32(RegStorage reg);
- RegStorage TargetReg(SpecialTargetRegister reg);
- RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE;
- RegLocation GetReturnAlt();
- RegLocation GetReturnWideAlt();
- RegLocation LocCReturn();
- RegLocation LocCReturnRef();
- RegLocation LocCReturnDouble();
- RegLocation LocCReturnFloat();
- RegLocation LocCReturnWide();
- ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
- void AdjustSpillMask();
- void ClobberCallerSave();
- void FreeCallTemps();
- void LockCallTemps();
- void CompilerInitializeRegAlloc();
-
- // Required for target - miscellaneous.
- void AssembleLIR();
- int AssignInsnOffsets();
- void AssignOffsets();
- AssemblerStatus AssembleInstructions(CodeOffset start_addr);
- void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
- void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
- ResourceMask* use_mask, ResourceMask* def_mask) OVERRIDE;
- const char* GetTargetInstFmt(int opcode);
- const char* GetTargetInstName(int opcode);
- std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
- ResourceMask GetPCUseDefEncoding() const OVERRIDE;
- uint64_t GetTargetInstFlags(int opcode);
- size_t GetInsnSize(LIR* lir) OVERRIDE;
- bool IsUnconditionalBranch(LIR* lir);
-
- // Get the register class for load/store of a field.
- RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
-
- // Required for target - Dalvik-level generators.
- void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2, int flags);
- void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale);
- void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark);
- void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_shift, int flags);
- void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
- bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
- bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
- bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
- bool GenInlinedSqrt(CallInfo* info);
- bool GenInlinedPeek(CallInfo* info, OpSize size);
- bool GenInlinedPoke(CallInfo* info, OpSize size);
- void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, int flags) OVERRIDE;
- RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
- void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheckWide(RegStorage reg);
- void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- void GenExitSequence();
- void GenSpecialExitSequence() OVERRIDE;
- void GenSpecialEntryForSuspend() OVERRIDE;
- void GenSpecialExitForSuspend() OVERRIDE;
- void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- void GenSelect(BasicBlock* bb, MIR* mir);
- void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
- int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) OVERRIDE;
- bool GenMemBarrier(MemBarrierKind barrier_kind);
- void GenMoveException(RegLocation rl_dest);
- void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit);
- void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
-
- // Required for target - single operation generators.
- LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
- LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
- LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpIT(ConditionCode cond, const char* guide);
- void OpEndIT(LIR* it);
- LIR* OpMem(OpKind op, RegStorage r_base, int disp);
- void OpPcRelLoad(RegStorage reg, LIR* target);
- LIR* OpReg(OpKind op, RegStorage r_dest_src);
- void OpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
- LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
- LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
- LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
- LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
- LIR* OpTestSuspend(LIR* target);
- LIR* OpVldm(RegStorage r_base, int count);
- LIR* OpVstm(RegStorage r_base, int count);
- void OpRegCopyWide(RegStorage dest, RegStorage src);
-
- // TODO: collapse r_dest.
- LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size);
- // TODO: collapse r_src.
- LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size);
- void SpillCoreRegs();
- void UnSpillCoreRegs();
- static const MipsEncodingMap EncodingMap[kMipsLast];
- bool InexpensiveConstantInt(int32_t value);
- bool InexpensiveConstantFloat(int32_t value);
- bool InexpensiveConstantLong(int64_t value);
- bool InexpensiveConstantDouble(int64_t value);
-
- bool WideGPRsAreAliases() const OVERRIDE {
- return false; // Wide GPRs are formed by pairing.
- }
- bool WideFPRsAreAliases() const OVERRIDE {
- return false; // Wide FPRs are formed by pairing.
- }
+ public:
+ MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
- LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+ // Required for target - codegen utilities.
+ bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
+ RegLocation rl_dest, int lit);
+ bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
+ void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, int32_t constant)
+ OVERRIDE;
+ void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, int64_t constant)
+ OVERRIDE;
+ LIR* CheckSuspendUsingLoad() OVERRIDE;
+ RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
+ LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
+ OpSize size) OVERRIDE;
+ LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
+ LIR* LoadConstantWideNoClobber(RegStorage r_dest, int64_t value);
+ LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
+ LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+ VolatileKind is_volatile) OVERRIDE;
+ LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
+ OpSize size) OVERRIDE;
+ LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
+ LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
- RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) OVERRIDE;
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
- OVERRIDE;
+ /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
+ void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
- NextCallInsn GetNextSDCallInsn() OVERRIDE;
- LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
+ // Required for target - register utilities.
+ RegStorage Solo64ToPair64(RegStorage reg);
+ RegStorage Fp64ToSolo32(RegStorage reg);
+ RegStorage TargetReg(SpecialTargetRegister reg);
+ RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE;
+ RegStorage TargetPtrReg(SpecialTargetRegister reg) OVERRIDE {
+ return TargetReg(reg, cu_->target64 ? kWide : kNotWide);
+ }
+ RegLocation GetReturnAlt();
+ RegLocation GetReturnWideAlt();
+ RegLocation LocCReturn();
+ RegLocation LocCReturnRef();
+ RegLocation LocCReturnDouble();
+ RegLocation LocCReturnFloat();
+ RegLocation LocCReturnWide();
+ ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
+ void AdjustSpillMask();
+ void ClobberCallerSave();
+ void FreeCallTemps();
+ void LockCallTemps();
+ void CompilerInitializeRegAlloc();
- // Unimplemented intrinsics.
- bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
- OVERRIDE {
- return false;
- }
+ // Required for target - miscellaneous.
+ void AssembleLIR();
+ int AssignInsnOffsets();
+ void AssignOffsets();
+ AssemblerStatus AssembleInstructions(CodeOffset start_addr);
+ void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
+ void SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+ ResourceMask* def_mask) OVERRIDE;
+ const char* GetTargetInstFmt(int opcode);
+ const char* GetTargetInstName(int opcode);
+ std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ ResourceMask GetPCUseDefEncoding() const OVERRIDE;
+ uint64_t GetTargetInstFlags(int opcode);
+ size_t GetInsnSize(LIR* lir) OVERRIDE;
+ bool IsUnconditionalBranch(LIR* lir);
+
+ // Get the register class for load/store of a field.
+ RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
+
+ // Required for target - Dalvik-level generators.
+ void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation lr_shift);
+ void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, int flags);
+ void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_dest, int scale);
+ void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
+ RegLocation rl_src, int scale, bool card_mark);
+ void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift, int flags);
+ void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+ bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
+ bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
+ bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
+ bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
+ bool GenInlinedSqrt(CallInfo* info);
+ bool GenInlinedPeek(CallInfo* info, OpSize size);
+ bool GenInlinedPoke(CallInfo* info, OpSize size);
+ void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
+ void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, int flags) OVERRIDE;
+ RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
+ void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivZeroCheckWide(RegStorage reg);
+ void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+ void GenExitSequence();
+ void GenSpecialExitSequence() OVERRIDE;
+ void GenSpecialEntryForSuspend() OVERRIDE;
+ void GenSpecialExitForSuspend() OVERRIDE;
+ void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+ void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+ void GenSelect(BasicBlock* bb, MIR* mir);
+ void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
+ int32_t true_val, int32_t false_val, RegStorage rs_dest,
+ RegisterClass dest_reg_class) OVERRIDE;
+ bool GenMemBarrier(MemBarrierKind barrier_kind);
+ void GenMoveException(RegLocation rl_dest);
+ void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
+ int first_bit, int second_bit);
+ void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+ void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+ void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+ bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
- // True if isa is rev R6.
- const bool isaIsR6_;
+ // Required for target - single operation generators.
+ LIR* OpUnconditionalBranch(LIR* target);
+ LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
+ LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
+ LIR* OpCondBranch(ConditionCode cc, LIR* target);
+ LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
+ LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpIT(ConditionCode cond, const char* guide);
+ void OpEndIT(LIR* it);
+ LIR* OpMem(OpKind op, RegStorage r_base, int disp);
+ void OpPcRelLoad(RegStorage reg, LIR* target);
+ LIR* OpReg(OpKind op, RegStorage r_dest_src);
+ void OpRegCopy(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
+ LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
+ LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
+ LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
+ LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
+ LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
+ LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
+ LIR* OpTestSuspend(LIR* target);
+ LIR* OpVldm(RegStorage r_base, int count);
+ LIR* OpVstm(RegStorage r_base, int count);
+ void OpRegCopyWide(RegStorage dest, RegStorage src);
- // True if floating point unit is 32bits.
- const bool fpuIs32Bit_;
+ // TODO: collapse r_dest.
+ LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
+ // TODO: collapse r_src.
+ LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
+ void SpillCoreRegs();
+ void UnSpillCoreRegs();
+ static const MipsEncodingMap EncodingMap[kMipsLast];
+ bool InexpensiveConstantInt(int32_t value);
+ bool InexpensiveConstantFloat(int32_t value);
+ bool InexpensiveConstantLong(int64_t value);
+ bool InexpensiveConstantDouble(int64_t value);
- private:
- void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
+ bool WideGPRsAreAliases() const OVERRIDE {
+ return cu_->target64; // Wide GPRs are formed by pairing on mips32.
+ }
+ bool WideFPRsAreAliases() const OVERRIDE {
+ return cu_->target64; // Wide FPRs are formed by pairing on mips32.
+ }
- void ConvertShortToLongBranch(LIR* lir);
+ LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
+
+ RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2, bool is_div,
+ int flags) OVERRIDE;
+ RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
+ NextCallInsn GetNextSDCallInsn() OVERRIDE;
+ LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
+
+ // Unimplemented intrinsics.
+ bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
+ return false;
+ }
+ bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
+ OVERRIDE {
+ return false;
+ }
+
+ // True if isa is rev R6.
+ const bool isaIsR6_;
+
+ // True if floating point unit is 32bits.
+ const bool fpuIs32Bit_;
+
+ private:
+ void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+
+ void ConvertShortToLongBranch(LIR* lir);
+
+ // Mips64 specific long gen methods:
+ void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
+ void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+ void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div, int flags);
+ void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
+ RegisterClass reg_class);
+ RegStorage AllocPtrSizeTemp(bool required = true);
+
+ /**
+ * @param reg #RegStorage containing a Solo64 input register (e.g. @c a1 or @c d0).
+ * @return A Solo32 with the same register number as the @p reg (e.g. @c a1 or @c f0).
+ * @see As64BitReg
+ */
+ RegStorage As32BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 64b register";
+ } else {
+ LOG(WARNING) << "Expected 64b register";
+ return reg;
+ }
+ }
+ RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
+
+ /**
+ * @param reg #RegStorage containing a Solo32 input register (e.g. @c a1 or @c f0).
+ * @return A Solo64 with the same register number as the @p reg (e.g. @c a1 or @c d0).
+ */
+ RegStorage As64BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 32b register";
+ } else {
+ LOG(WARNING) << "Expected 32b register";
+ return reg;
+ }
+ }
+ RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
+
+ RegStorage Check64BitReg(RegStorage reg) {
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Checked for 64b register";
+ } else {
+ LOG(WARNING) << "Checked for 64b register";
+ return As64BitReg(reg);
+ }
+ }
+ return reg;
+ }
};
} // namespace art
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 37bf1a6b9a..45fd1a9433 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -23,8 +23,8 @@
namespace art {
-void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
int op = kMipsNop;
RegLocation rl_result;
@@ -51,7 +51,7 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
break;
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
- FlushAllRegs(); // Send everything to home location
+ FlushAllRegs(); // Send everything to home location.
CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
rl_result = GetReturn(kFPReg);
StoreValue(rl_dest, rl_result);
@@ -69,8 +69,8 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode,
StoreValue(rl_dest, rl_result);
}
-void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
int op = kMipsNop;
RegLocation rl_result;
@@ -93,7 +93,7 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode,
break;
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
- FlushAllRegs(); // Send everything to home location
+ FlushAllRegs(); // Send everything to home location.
CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
rl_result = GetReturnWide(kFPReg);
StoreValueWide(rl_dest, rl_result);
@@ -147,22 +147,22 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
op = kMipsFcvtdw;
break;
case Instruction::FLOAT_TO_INT:
- GenConversionCall(kQuickF2iz, rl_dest, rl_src);
+ GenConversionCall(kQuickF2iz, rl_dest, rl_src, kCoreReg);
return;
case Instruction::DOUBLE_TO_INT:
- GenConversionCall(kQuickD2iz, rl_dest, rl_src);
+ GenConversionCall(kQuickD2iz, rl_dest, rl_src, kCoreReg);
return;
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(kQuickL2d, rl_dest, rl_src);
+ GenConversionCall(kQuickL2d, rl_dest, rl_src, kFPReg);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(kQuickF2l, rl_dest, rl_src);
+ GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
return;
case Instruction::LONG_TO_FLOAT:
- GenConversionCall(kQuickL2f, rl_dest, rl_src);
+ GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(kQuickD2l, rl_dest, rl_src);
+ GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -189,24 +189,24 @@ static RegStorage GetWideArgFP(bool fpuIs32Bit, size_t base) {
if (fpuIs32Bit) {
switch (base) {
case 0:
- return RegStorage(RegStorage::k64BitPair, rMIPS_FARG0, rMIPS_FARG1);
+ return RegStorage(RegStorage::k64BitPair, rFARG0, rFARG1);
case 2:
- return RegStorage(RegStorage::k64BitPair, rMIPS_FARG2, rMIPS_FARG3);
+ return RegStorage(RegStorage::k64BitPair, rFARG2, rFARG3);
}
} else {
switch (base) {
case 0:
- return RegStorage(RegStorage::k64BitSolo, rMIPS_FARG0);
+ return RegStorage(RegStorage::k64BitSolo, rFARG0);
case 2:
- return RegStorage(RegStorage::k64BitSolo, rMIPS_FARG2);
+ return RegStorage(RegStorage::k64BitSolo, rFARG2);
}
}
LOG(FATAL) << "Unsupported Mips.GetWideFP: " << fpuIs32Bit << " " << base;
UNREACHABLE();
}
-void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
+void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
bool wide = true;
QuickEntrypointEnum target;
@@ -232,16 +232,23 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
FlushAllRegs();
LockCallTemps();
if (wide) {
- RegStorage r_tmp1 = GetWideArgFP(fpuIs32Bit_, 0);
- RegStorage r_tmp2 = GetWideArgFP(fpuIs32Bit_, 2);
+ RegStorage r_tmp1;
+ RegStorage r_tmp2;
+ if (cu_->target64) {
+ r_tmp1 = RegStorage(RegStorage::k64BitSolo, rFARG0);
+ r_tmp2 = RegStorage(RegStorage::k64BitSolo, rFARG1);
+ } else {
+ r_tmp1 = GetWideArgFP(fpuIs32Bit_, 0);
+ r_tmp2 = GetWideArgFP(fpuIs32Bit_, 2);
+ }
LoadValueDirectWideFixed(rl_src1, r_tmp1);
LoadValueDirectWideFixed(rl_src2, r_tmp2);
} else {
- LoadValueDirectFixed(rl_src1, rs_rMIPS_FARG0);
- LoadValueDirectFixed(rl_src2, rs_rMIPS_FARG2);
+ LoadValueDirectFixed(rl_src1, rs_rFARG0);
+ LoadValueDirectFixed(rl_src2, cu_->target64 ? rs_rFARG1 : rs_rFARG2);
}
RegStorage r_tgt = LoadHelper(target);
- // NOTE: not a safepoint
+ // NOTE: not a safepoint.
OpReg(kOpBlx, r_tgt);
RegLocation rl_result = GetReturn(kCoreReg);
StoreValue(rl_dest, rl_result);
@@ -254,18 +261,30 @@ void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bo
void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
- rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
+ if (cu_->target64) {
+ rl_src = LoadValue(rl_src, kFPReg);
+ rl_result = EvalLoc(rl_dest, kFPReg, true);
+ NewLIR2(kMipsFnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else {
+ rl_src = LoadValue(rl_src, kCoreReg);
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ OpRegRegImm(kOpAdd, rl_result.reg, rl_src.reg, 0x80000000);
+ }
StoreValue(rl_dest, rl_result);
}
void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
- rl_src = LoadValueWide(rl_src, kCoreReg);
- rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
- OpRegCopy(rl_result.reg, rl_src.reg);
+ if (cu_->target64) {
+ rl_src = LoadValueWide(rl_src, kFPReg);
+ rl_result = EvalLocWide(rl_dest, kFPReg, true);
+ NewLIR2(kMipsFnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else {
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ OpRegRegImm(kOpAdd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x80000000);
+ OpRegCopy(rl_result.reg, rl_src.reg);
+ }
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 8093c9772c..626b36ea28 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -34,6 +34,7 @@ namespace art {
* x < y return -1
* x > y return 1
*
+ * Mips32 implementation
* slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
* sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
* subu res, t0, t1 # res = -1:1:0 for [ < > = ]
@@ -43,26 +44,40 @@ namespace art {
* subu res, t0, t1
* finish:
*
+ * Mips64 implementation
+ * slt temp, x, y; # (x < y) ? 1:0
+ * slt res, y, x; # (x > y) ? 1:0
+ * subu res, res, temp; # res = -1:1:0 for [ < > = ]
+ *
*/
-void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
+void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- RegStorage t0 = AllocTemp();
- RegStorage t1 = AllocTemp();
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
- NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
- NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
- NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
- NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
- FreeTemp(t0);
- FreeTemp(t1);
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch->target = target;
- StoreValue(rl_dest, rl_result);
+ if (cu_->target64) {
+ RegStorage temp = AllocTempWide();
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(kMipsSlt, temp.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR3(kMipsSlt, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), rl_result.reg.GetReg(), temp.GetReg());
+ FreeTemp(temp);
+ StoreValue(rl_dest, rl_result);
+ } else {
+ RegStorage t0 = AllocTemp();
+ RegStorage t1 = AllocTemp();
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ NewLIR3(kMipsSlt, t0.GetReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ NewLIR3(kMipsSlt, t1.GetReg(), rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
+ NewLIR3(kMipsSltu, t0.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ NewLIR3(kMipsSltu, t1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetLowReg());
+ NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1.GetReg(), t0.GetReg());
+ FreeTemp(t0);
+ FreeTemp(t1);
+ LIR* target = NewLIR0(kPseudoTargetLabel);
+ branch->target = target;
+ StoreValue(rl_dest, rl_result);
+ }
}
LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
@@ -134,7 +149,7 @@ LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage sr
LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
LIR* branch;
if (check_value != 0) {
- // TUNING: handle s16 & kCondLt/Mi case using slti
+ // TUNING: handle s16 & kCondLt/Mi case using slti.
RegStorage t_reg = AllocTemp();
LoadConstant(t_reg, check_value);
branch = OpCmpBranch(cond, reg, t_reg, target);
@@ -164,17 +179,34 @@ LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_v
}
LIR* MipsMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
- // If src or dest is a pair, we'll be using low reg.
- if (r_dest.IsPair()) {
- r_dest = r_dest.GetLow();
- }
- if (r_src.IsPair()) {
- r_src = r_src.GetLow();
+ LIR* res;
+ MipsOpCode opcode;
+
+ if (!cu_->target64) {
+ // If src or dest is a pair, we'll be using low reg.
+ if (r_dest.IsPair()) {
+ r_dest = r_dest.GetLow();
+ }
+ if (r_src.IsPair()) {
+ r_src = r_src.GetLow();
+ }
+ } else {
+ DCHECK(!r_dest.IsPair() && !r_src.IsPair());
}
+
if (r_dest.IsFloat() || r_src.IsFloat())
return OpFpRegCopy(r_dest, r_src);
- LIR* res = RawLIR(current_dalvik_offset_, kMipsMove,
- r_dest.GetReg(), r_src.GetReg());
+ if (cu_->target64) {
+ // TODO: Check that r_src and r_dest are both 32 or both 64 bits length on Mips64.
+ if (r_dest.Is64Bit() || r_src.Is64Bit()) {
+ opcode = kMipsMove;
+ } else {
+ opcode = kMipsSll;
+ }
+ } else {
+ opcode = kMipsMove;
+ }
+ res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
res->flags.is_nop = true;
}
@@ -189,6 +221,10 @@ void MipsMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
}
void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
+ if (cu_->target64) {
+ OpRegCopy(r_dest, r_src);
+ return;
+ }
if (r_dest != r_src) {
bool dest_fp = r_dest.IsFloat();
bool src_fp = r_src.IsFloat();
@@ -213,16 +249,16 @@ void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
if (src_fp) {
// Here if dest is core reg and src is fp reg.
if (fpuIs32Bit_) {
- NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
- NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
+ NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetLowReg());
+ NewLIR2(kMipsMfc1, r_dest.GetHighReg(), r_src.GetHighReg());
} else {
- r_src = Fp64ToSolo32(r_src);
- NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetReg());
- NewLIR2(kMipsMfhc1, r_dest.GetHighReg(), r_src.GetReg());
+ r_src = Fp64ToSolo32(r_src);
+ NewLIR2(kMipsMfc1, r_dest.GetLowReg(), r_src.GetReg());
+ NewLIR2(kMipsMfhc1, r_dest.GetHighReg(), r_src.GetReg());
}
} else {
// Here if both src and dest are core registers.
- // Handle overlap
+ // Handle overlap.
if (r_src.GetHighReg() == r_dest.GetLowReg()) {
OpRegCopy(r_dest.GetHigh(), r_src.GetHigh());
OpRegCopy(r_dest.GetLow(), r_src.GetLow());
@@ -263,17 +299,15 @@ RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStor
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (isaIsR6_) {
- NewLIR3(is_div ? kMipsR6Div : kMipsR6Mod,
- rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
+ NewLIR3(is_div ? kMipsR6Div : kMipsR6Mod, rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
} else {
- NewLIR2(kMipsDiv, reg1.GetReg(), reg2.GetReg());
- NewLIR1(is_div ? kMipsMflo : kMipsMfhi, rl_result.reg.GetReg());
+ NewLIR2(kMipsR2Div, reg1.GetReg(), reg2.GetReg());
+ NewLIR1(is_div ? kMipsR2Mflo : kMipsR2Mfhi, rl_result.reg.GetReg());
}
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
- bool is_div) {
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
RegStorage t_reg = AllocTemp();
NewLIR3(kMipsAddiu, t_reg.GetReg(), rZERO, lit);
RegLocation rl_result = GenDivRem(rl_dest, reg1, t_reg, is_div);
@@ -322,10 +356,17 @@ bool MipsMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
// MIPS supports only aligned access. Defer unaligned access to JNI implementation.
return false;
}
- RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
+ RegLocation rl_src_address = info->args[0]; // Long address.
+ if (!cu_->target64) {
+ rl_src_address = NarrowRegLoc(rl_src_address); // Ignore high half in info->args[1].
+ }
RegLocation rl_dest = InlineTarget(info);
- RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+ RegLocation rl_address;
+ if (cu_->target64) {
+ rl_address = LoadValueWide(rl_src_address, kCoreReg);
+ } else {
+ rl_address = LoadValue(rl_src_address, kCoreReg);
+ }
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
DCHECK(size == kSignedByte);
LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
@@ -338,10 +379,17 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
// MIPS supports only aligned access. Defer unaligned access to JNI implementation.
return false;
}
- RegLocation rl_src_address = info->args[0]; // long address
- rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
- RegLocation rl_src_value = info->args[2]; // [size] value
- RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
+ RegLocation rl_src_address = info->args[0]; // Long address.
+ if (!cu_->target64) {
+ rl_src_address = NarrowRegLoc(rl_src_address); // Ignore high half in info->args[1].
+ }
+ RegLocation rl_src_value = info->args[2]; // [size] value.
+ RegLocation rl_address;
+ if (cu_->target64) {
+ rl_address = LoadValueWide(rl_src_address, kCoreReg);
+ } else {
+ rl_address = LoadValue(rl_src_address, kCoreReg);
+ }
DCHECK(size == kSignedByte);
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
@@ -366,8 +414,7 @@ LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
UNREACHABLE();
}
-void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
+void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
int first_bit, int second_bit) {
UNUSED(lit);
RegStorage t_reg = AllocTemp();
@@ -380,20 +427,24 @@ void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
}
void MipsMir2Lir::GenDivZeroCheckWide(RegStorage reg) {
- DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
- RegStorage t_reg = AllocTemp();
- OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
- GenDivZeroCheck(t_reg);
- FreeTemp(t_reg);
+ if (cu_->target64) {
+ GenDivZeroCheck(reg);
+ } else {
+ DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
+ RegStorage t_reg = AllocTemp();
+ OpRegRegReg(kOpOr, t_reg, reg.GetLow(), reg.GetHigh());
+ GenDivZeroCheck(t_reg);
+ FreeTemp(t_reg);
+ }
}
-// Test suspend flag, return target of taken suspend branch
+// Test suspend flag, return target of taken suspend branch.
LIR* MipsMir2Lir::OpTestSuspend(LIR* target) {
- OpRegImm(kOpSub, rs_rMIPS_SUSPEND, 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rs_rMIPS_SUSPEND, 0, target);
+ OpRegImm(kOpSub, TargetPtrReg(kSuspend), 1);
+ return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, TargetPtrReg(kSuspend), 0, target);
}
-// Decrement register and branch on condition
+// Decrement register and branch on condition.
LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
OpRegImm(kOpSub, reg, 1);
return OpCmpImmBranch(c_code, reg, 0, target);
@@ -423,9 +474,7 @@ void MipsMir2Lir::OpEndIT(LIR* it) {
LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
}
-void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- UNUSED(opcode);
+void MipsMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -440,15 +489,14 @@ void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src2.reg.GetLow(), rl_src1.reg.GetLow());
RegStorage t_reg = AllocTemp();
OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHigh(), rl_src1.reg.GetHigh());
- NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetLowReg(),
+ rl_src2.reg.GetLowReg());
OpRegRegReg(kOpAdd, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- UNUSED(opcode);
+void MipsMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -471,47 +519,136 @@ void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
void MipsMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, int flags) {
- switch (opcode) {
- case Instruction::ADD_LONG:
- case Instruction::ADD_LONG_2ADDR:
- GenAddLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::SUB_LONG:
- case Instruction::SUB_LONG_2ADDR:
- GenSubLong(opcode, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::NEG_LONG:
- GenNegLong(rl_dest, rl_src2);
- return;
-
- default:
- break;
+ if (cu_->target64) {
+ switch (opcode) {
+ case Instruction::NOT_LONG:
+ GenNotLong(rl_dest, rl_src2);
+ return;
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::MUL_LONG:
+ case Instruction::MUL_LONG_2ADDR:
+ GenMulLong(rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::DIV_LONG:
+ case Instruction::DIV_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
+ return;
+ case Instruction::REM_LONG:
+ case Instruction::REM_LONG_2ADDR:
+ GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
+ return;
+ case Instruction::AND_LONG:
+ case Instruction::AND_LONG_2ADDR:
+ GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::OR_LONG:
+ case Instruction::OR_LONG_2ADDR:
+ GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::XOR_LONG:
+ case Instruction::XOR_LONG_2ADDR:
+ GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::NEG_LONG:
+ GenNegLong(rl_dest, rl_src2);
+ return;
+
+ default:
+ LOG(FATAL) << "Invalid long arith op";
+ return;
+ }
+ } else {
+ switch (opcode) {
+ case Instruction::ADD_LONG:
+ case Instruction::ADD_LONG_2ADDR:
+ GenAddLong(rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::SUB_LONG:
+ case Instruction::SUB_LONG_2ADDR:
+ GenSubLong(rl_dest, rl_src1, rl_src2);
+ return;
+ case Instruction::NEG_LONG:
+ GenNegLong(rl_dest, rl_src2);
+ return;
+ default:
+ break;
+ }
+ // Fallback for all other ops.
+ Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
}
+}
- // Fallback for all other ops.
- Mir2Lir::GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
+void MipsMir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
+ StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
+void MipsMir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- /*
- * [v1 v0] = -[a1 a0]
- * negu v0,a0
- * negu v1,a1
- * sltu t1,r_zero
- * subu v1,v1,t1
- */
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegReg(kOpMvn, rl_result.reg, rl_src.reg);
+ StoreValueWide(rl_dest, rl_result);
+}
- OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_src.reg.GetLow());
- OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
- RegStorage t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg.GetReg(), rZERO, rl_result.reg.GetLowReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
- FreeTemp(t_reg);
+void MipsMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(kMips64Dmul, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(opcode);
+ // TODO: Implement easy div/rem?
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+ if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+ GenDivZeroCheckWide(rl_src2.reg);
+ }
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(is_div ? kMips64Ddiv : kMips64Dmod, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
+ rl_src2.reg.GetReg());
StoreValueWide(rl_dest, rl_result);
}
+void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
+ rl_src = LoadValueWide(rl_src, kCoreReg);
+ RegLocation rl_result;
+
+ if (cu_->target64) {
+ rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ // [v1 v0] = -[a1 a0]
+ // negu v0,a0
+ // negu v1,a1
+ // sltu t1,r_zero
+ // subu v1,v1,t1
+ OpRegReg(kOpNeg, rl_result.reg.GetLow(), rl_src.reg.GetLow());
+ OpRegReg(kOpNeg, rl_result.reg.GetHigh(), rl_src.reg.GetHigh());
+ RegStorage t_reg = AllocTemp();
+ NewLIR3(kMipsSltu, t_reg.GetReg(), rZERO, rl_result.reg.GetLowReg());
+ OpRegRegReg(kOpSub, rl_result.reg.GetHigh(), rl_result.reg.GetHigh(), t_reg);
+ FreeTemp(t_reg);
+ StoreValueWide(rl_dest, rl_result);
+ }
+}
+
/*
* Generate array load
*/
@@ -532,18 +669,18 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
}
- /* null object? */
+ // Null object?
GenNullCheck(rl_array.reg, opt_flags);
- RegStorage reg_ptr = AllocTemp();
+ RegStorage reg_ptr = (cu_->target64) ? AllocTempRef() : AllocTemp();
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
RegStorage reg_len;
if (needs_range_check) {
reg_len = AllocTemp();
- /* Get len */
+ // Get len.
Load32Disp(rl_array.reg, len_offset, reg_len);
}
- /* reg_ptr -> array data */
+ // reg_ptr -> array data.
OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
FreeTemp(rl_array.reg);
if ((size == k64) || (size == kDouble)) {
@@ -573,7 +710,17 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
+
+ if (cu_->target64) {
+ if (rl_result.ref) {
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), As32BitReg(rl_result.reg), scale,
+ kReference);
+ } else {
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
+ }
+ } else {
+ LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
+ }
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -612,7 +759,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
allocated_reg_ptr_temp = true;
}
- /* null object? */
+ // Null object?
GenNullCheck(rl_array.reg, opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
@@ -620,14 +767,14 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
if (needs_range_check) {
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
- /* Get len */
+ // Get len.
Load32Disp(rl_array.reg, len_offset, reg_len);
}
- /* reg_ptr -> array data */
+ // reg_ptr -> array data.
OpRegImm(kOpAdd, reg_ptr, data_offset);
- /* at this point, reg_ptr points to array, 2 live temps */
+ // At this point, reg_ptr points to array, 2 live temps.
if ((size == k64) || (size == kDouble)) {
- // TUNING: specific wide routine that can handle fp regs
+ // TUNING: specific wide routine that can handle fp regs.
if (scale) {
RegStorage r_new_index = AllocTemp();
OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
@@ -660,18 +807,104 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
}
}
+void MipsMir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_shift) {
+ if (!cu_->target64) {
+ Mir2Lir::GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+ return;
+ }
+ OpKind op = kOpBkpt;
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case: " << opcode;
+ }
+ rl_shift = LoadValue(rl_shift, kCoreReg);
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
+ StoreValueWide(rl_dest, rl_result);
+}
+
void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift, int flags) {
UNUSED(flags);
- // Default implementation is just to ignore the constant case.
- GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+ if (!cu_->target64) {
+ // Default implementation is just to ignore the constant case.
+ GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+ return;
+ }
+ OpKind op = kOpBkpt;
+ // Per spec, we only care about low 6 bits of shift amount.
+ int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ if (shift_amount == 0) {
+ StoreValueWide(rl_dest, rl_src1);
+ return;
+ }
+
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ op = kOpLsl;
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ op = kOpAsr;
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ op = kOpLsr;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case";
+ }
+ OpRegRegImm(op, rl_result.reg, rl_src1.reg, shift_amount);
+ StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- int flags) {
+void MipsMir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2, int flags) {
// Default - bail to non-const handler.
GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
}
+void MipsMir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
+ if (!cu_->target64) {
+ Mir2Lir::GenIntToLong(rl_dest, rl_src);
+ return;
+ }
+ rl_src = LoadValue(rl_src, kCoreReg);
+ RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
+ NewLIR3(kMipsSll, rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0);
+ StoreValueWide(rl_dest, rl_result);
+}
+
+void MipsMir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
+ RegLocation rl_src, RegisterClass reg_class) {
+ FlushAllRegs(); // Send everything to home location.
+ CallRuntimeHelperRegLocation(trampoline, rl_src, false);
+ if (rl_dest.wide) {
+ RegLocation rl_result;
+ rl_result = GetReturnWide(reg_class);
+ StoreValueWide(rl_dest, rl_result);
+ } else {
+ RegLocation rl_result;
+ rl_result = GetReturn(reg_class);
+ StoreValue(rl_dest, rl_result);
+ }
+}
+
} // namespace art
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 70370559bc..078ac0a2ad 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -25,25 +25,29 @@ namespace art {
/*
* Runtime register conventions.
*
- * zero is always the value 0
- * at is scratch (normally used as temp reg by assembler)
- * v0, v1 are scratch (normally hold subroutine return values)
- * a0-a3 are scratch (normally hold subroutine arguments)
- * t0-t8 are scratch
- * t9 is scratch (normally used for function calls)
- * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
- * s1 (rMIPS_SELF) is reserved [holds current &Thread]
- * s2-s7 are callee save (promotion target)
- * k0, k1 are reserved for use by interrupt handlers
- * gp is reserved for global pointer
- * sp is reserved
- * s8 is callee save (promotion target)
- * ra is scratch (normally holds the return addr)
+ * mips32 | mips64
+ * $0: zero is always the value 0
+ * $1: at is scratch (normally used as temp reg by assembler)
+ * $2,$3: v0, v1 are scratch (normally hold subroutine return values)
+ * $4-$7: a0-a3 are scratch (normally hold subroutine arguments)
+ * $8-$11: t0-t3 are scratch | a4-a7 are scratch (normally hold subroutine arguments)
+ * $12-$15: t4-t7 are scratch | t0-t3 are scratch
+ * $16: s0 (rSUSPEND) is reserved [holds suspend-check counter]
+ * $17: s1 (rSELF) is reserved [holds current &Thread]
+ * $18-$23: s2-s7 are callee save (promotion target)
+ * $24: t8 is scratch
+ * $25: t9 is scratch (normally used for function calls)
+ * $26,$27: k0, k1 are reserved for use by interrupt handlers
+ * $28: gp is reserved for global pointer
+ * $29: sp is reserved
+ * $30: s8 is callee save (promotion target)
+ * $31: ra is scratch (normally holds the return addr)
*
* Preserved across C calls: s0-s8
- * Trashed across C calls: at, v0-v1, a0-a3, t0-t9, gp, ra
+ * Trashed across C calls (mips32): at, v0-v1, a0-a3, t0-t9, gp, ra
+ * Trashed across C calls (mips64): at, v0-v1, a0-a7, t0-t3, t8, t9, gp, ra
*
- * Floating pointer registers
+ * Floating pointer registers (mips32)
* NOTE: there are 32 fp registers (16 df pairs), but currently
* only support 16 fp registers (8 df pairs).
* f0-f15
@@ -51,14 +55,23 @@ namespace art {
*
* f0-f15 (df0-df7) trashed across C calls
*
+ * Floating pointer registers (mips64)
+ * NOTE: there are 32 fp registers.
+ * f0-f31
+ *
* For mips32 code use:
* a0-a3 to hold operands
* v0-v1 to hold results
* t0-t9 for temps
*
+ * For mips64 code use:
+ * a0-a7 to hold operands
+ * v0-v1 to hold results
+ * t0-t3, t8-t9 for temps
+ *
* All jump/branch instructions have a delay slot after it.
*
- * Stack frame diagram (stack grows down, higher addresses at top):
+ * Stack frame diagram (stack grows down, higher addresses at top):
*
* +------------------------+
* | IN[ins-1] | {Note: resides in caller's frame}
@@ -90,18 +103,6 @@ namespace art {
#define LOWORD_OFFSET 0
#define HIWORD_OFFSET 4
-#define rARG0 rA0
-#define rs_rARG0 rs_rA0
-#define rARG1 rA1
-#define rs_rARG1 rs_rA1
-#define rARG2 rA2
-#define rs_rARG2 rs_rA2
-#define rARG3 rA3
-#define rs_rARG3 rs_rA3
-#define rRESULT0 rV0
-#define rs_rRESULT0 rs_rV0
-#define rRESULT1 rV1
-#define rs_rRESULT1 rs_rV1
#define rFARG0 rF12
#define rs_rFARG0 rs_rF12
@@ -111,14 +112,6 @@ namespace art {
#define rs_rFARG2 rs_rF14
#define rFARG3 rF15
#define rs_rFARG3 rs_rF15
-#define rFRESULT0 rF0
-#define rs_rFRESULT0 rs_rF0
-#define rFRESULT1 rF1
-#define rs_rFRESULT1 rs_rF1
-
-// Regs not used for Mips.
-#define rMIPS_LR RegStorage::kInvalidRegVal
-#define rMIPS_PC RegStorage::kInvalidRegVal
enum MipsResourceEncodingPos {
kMipsGPReg0 = 0,
@@ -130,6 +123,10 @@ enum MipsResourceEncodingPos {
kMipsRegLO,
kMipsRegPC,
kMipsRegEnd = 51,
+ // Mips64 related:
+ kMips64FPRegEnd = 64,
+ kMips64RegPC = kMips64FPRegEnd,
+ kMips64RegEnd = 65,
};
#define ENCODE_MIPS_REG_LIST(N) (static_cast<uint64_t>(N))
@@ -144,38 +141,78 @@ enum MipsResourceEncodingPos {
#define FR_BIT 0
enum MipsNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
- rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
- rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
- rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
- rV1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
- rA0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
- rA1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
- rA2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
- rA3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
- rT0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
- rT1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
- rT2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
- rT3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
- rT4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
- rT5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
- rT6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
- rT7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
- rS0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
- rS1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
- rS2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
- rS3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
- rS4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
- rS5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
- rS6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
- rS7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
- rT8 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
- rT9 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
- rK0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
- rK1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
- rGP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
- rSP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
- rFP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
- rRA = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
+ rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
+ rZEROd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
+ rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
+ rATd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
+ rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
+ rV0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
+ rV1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
+ rV1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
+ rA0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
+ rA0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
+ rA1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
+ rA1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
+ rA2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
+ rA2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
+ rA3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
+ rA3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
+ rT0_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
+ rA4 = rT0_32,
+ rA4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
+ rT1_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
+ rA5 = rT1_32,
+ rA5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
+ rT2_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
+ rA6 = rT2_32,
+ rA6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
+ rT3_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
+ rA7 = rT3_32,
+ rA7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
+ rT4_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
+ rT0 = rT4_32,
+ rT0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
+ rT5_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
+ rT1 = rT5_32,
+ rT1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
+ rT6_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
+ rT2 = rT6_32,
+ rT2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
+ rT7_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
+ rT3 = rT7_32,
+ rT3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
+ rS0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
+ rS0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 16,
+ rS1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
+ rS1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 17,
+ rS2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
+ rS2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 18,
+ rS3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
+ rS3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 19,
+ rS4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
+ rS4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 20,
+ rS5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
+ rS5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 21,
+ rS6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
+ rS6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 22,
+ rS7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
+ rS7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 23,
+ rT8 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
+ rT8d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 24,
+ rT9 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
+ rT9d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 25,
+ rK0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
+ rK0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 26,
+ rK1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
+ rK1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 27,
+ rGP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
+ rGPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 28,
+ rSP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
+ rSPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 29,
+ rFP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
+ rFPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 30,
+ rRA = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
+ rRAd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 31,
rF0 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
rF1 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
@@ -193,6 +230,24 @@ enum MipsNativeRegisterPool { // private marker to avoid generate-operator-out.
rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
+
+ rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
+ rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
+ rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
+ rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
+ rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
+ rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
+ rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
+ rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
+ rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
+ rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
+ rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
+ rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
+ rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
+ rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
+ rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
+ rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
+
#if 0
/*
* TODO: The shared resource mask doesn't have enough bit positions to describe all
@@ -253,6 +308,39 @@ enum MipsNativeRegisterPool { // private marker to avoid generate-operator-out.
rD14_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
rD15_fr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
#endif
+
+ rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
+ rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
+ rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
+ rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
+ rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
+ rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
+ rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
+ rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
+ rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
+ rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
+ rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
+ rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
+ rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
+ rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
+ rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
+ rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
+ rD16 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
+ rD17 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
+ rD18 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
+ rD19 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
+ rD20 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
+ rD21 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
+ rD22 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
+ rD23 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
+ rD24 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
+ rD25 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
+ rD26 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
+ rD27 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
+ rD28 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
+ rD29 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
+ rD30 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
+ rD31 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
};
constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
@@ -263,14 +351,22 @@ constexpr RegStorage rs_rA0(RegStorage::kValid | rA0);
constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
-constexpr RegStorage rs_rT0(RegStorage::kValid | rT0);
-constexpr RegStorage rs_rT1(RegStorage::kValid | rT1);
-constexpr RegStorage rs_rT2(RegStorage::kValid | rT2);
-constexpr RegStorage rs_rT3(RegStorage::kValid | rT3);
-constexpr RegStorage rs_rT4(RegStorage::kValid | rT4);
-constexpr RegStorage rs_rT5(RegStorage::kValid | rT5);
-constexpr RegStorage rs_rT6(RegStorage::kValid | rT6);
-constexpr RegStorage rs_rT7(RegStorage::kValid | rT7);
+constexpr RegStorage rs_rT0_32(RegStorage::kValid | rT0_32);
+constexpr RegStorage rs_rA4 = rs_rT0_32;
+constexpr RegStorage rs_rT1_32(RegStorage::kValid | rT1_32);
+constexpr RegStorage rs_rA5 = rs_rT1_32;
+constexpr RegStorage rs_rT2_32(RegStorage::kValid | rT2_32);
+constexpr RegStorage rs_rA6 = rs_rT2_32;
+constexpr RegStorage rs_rT3_32(RegStorage::kValid | rT3_32);
+constexpr RegStorage rs_rA7 = rs_rT3_32;
+constexpr RegStorage rs_rT4_32(RegStorage::kValid | rT4_32);
+constexpr RegStorage rs_rT0 = rs_rT4_32;
+constexpr RegStorage rs_rT5_32(RegStorage::kValid | rT5_32);
+constexpr RegStorage rs_rT1 = rs_rT5_32;
+constexpr RegStorage rs_rT6_32(RegStorage::kValid | rT6_32);
+constexpr RegStorage rs_rT2 = rs_rT6_32;
+constexpr RegStorage rs_rT7_32(RegStorage::kValid | rT7_32);
+constexpr RegStorage rs_rT3 = rs_rT7_32;
constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
@@ -288,9 +384,38 @@ constexpr RegStorage rs_rSP(RegStorage::kValid | rSP);
constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
-constexpr RegStorage rs_rMIPS_LR(RegStorage::kInvalid); // Not used for MIPS.
-constexpr RegStorage rs_rMIPS_PC(RegStorage::kInvalid); // Not used for MIPS.
-constexpr RegStorage rs_rMIPS_COUNT(RegStorage::kInvalid); // Not used for MIPS.
+constexpr RegStorage rs_rZEROd(RegStorage::kValid | rZEROd);
+constexpr RegStorage rs_rATd(RegStorage::kValid | rATd);
+constexpr RegStorage rs_rV0d(RegStorage::kValid | rV0d);
+constexpr RegStorage rs_rV1d(RegStorage::kValid | rV1d);
+constexpr RegStorage rs_rA0d(RegStorage::kValid | rA0d);
+constexpr RegStorage rs_rA1d(RegStorage::kValid | rA1d);
+constexpr RegStorage rs_rA2d(RegStorage::kValid | rA2d);
+constexpr RegStorage rs_rA3d(RegStorage::kValid | rA3d);
+constexpr RegStorage rs_rA4d(RegStorage::kValid | rA4d);
+constexpr RegStorage rs_rA5d(RegStorage::kValid | rA5d);
+constexpr RegStorage rs_rA6d(RegStorage::kValid | rA6d);
+constexpr RegStorage rs_rA7d(RegStorage::kValid | rA7d);
+constexpr RegStorage rs_rT0d(RegStorage::kValid | rT0d);
+constexpr RegStorage rs_rT1d(RegStorage::kValid | rT1d);
+constexpr RegStorage rs_rT2d(RegStorage::kValid | rT2d);
+constexpr RegStorage rs_rT3d(RegStorage::kValid | rT3d);
+constexpr RegStorage rs_rS0d(RegStorage::kValid | rS0d);
+constexpr RegStorage rs_rS1d(RegStorage::kValid | rS1d);
+constexpr RegStorage rs_rS2d(RegStorage::kValid | rS2d);
+constexpr RegStorage rs_rS3d(RegStorage::kValid | rS3d);
+constexpr RegStorage rs_rS4d(RegStorage::kValid | rS4d);
+constexpr RegStorage rs_rS5d(RegStorage::kValid | rS5d);
+constexpr RegStorage rs_rS6d(RegStorage::kValid | rS6d);
+constexpr RegStorage rs_rS7d(RegStorage::kValid | rS7d);
+constexpr RegStorage rs_rT8d(RegStorage::kValid | rT8d);
+constexpr RegStorage rs_rT9d(RegStorage::kValid | rT9d);
+constexpr RegStorage rs_rK0d(RegStorage::kValid | rK0d);
+constexpr RegStorage rs_rK1d(RegStorage::kValid | rK1d);
+constexpr RegStorage rs_rGPd(RegStorage::kValid | rGPd);
+constexpr RegStorage rs_rSPd(RegStorage::kValid | rSPd);
+constexpr RegStorage rs_rFPd(RegStorage::kValid | rFPd);
+constexpr RegStorage rs_rRAd(RegStorage::kValid | rRAd);
constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
@@ -309,6 +434,23 @@ constexpr RegStorage rs_rF13(RegStorage::kValid | rF13);
constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
+constexpr RegStorage rs_rF16(RegStorage::kValid | rF16);
+constexpr RegStorage rs_rF17(RegStorage::kValid | rF17);
+constexpr RegStorage rs_rF18(RegStorage::kValid | rF18);
+constexpr RegStorage rs_rF19(RegStorage::kValid | rF19);
+constexpr RegStorage rs_rF20(RegStorage::kValid | rF20);
+constexpr RegStorage rs_rF21(RegStorage::kValid | rF21);
+constexpr RegStorage rs_rF22(RegStorage::kValid | rF22);
+constexpr RegStorage rs_rF23(RegStorage::kValid | rF23);
+constexpr RegStorage rs_rF24(RegStorage::kValid | rF24);
+constexpr RegStorage rs_rF25(RegStorage::kValid | rF25);
+constexpr RegStorage rs_rF26(RegStorage::kValid | rF26);
+constexpr RegStorage rs_rF27(RegStorage::kValid | rF27);
+constexpr RegStorage rs_rF28(RegStorage::kValid | rF28);
+constexpr RegStorage rs_rF29(RegStorage::kValid | rF29);
+constexpr RegStorage rs_rF30(RegStorage::kValid | rF30);
+constexpr RegStorage rs_rF31(RegStorage::kValid | rF31);
+
constexpr RegStorage rs_rD0_fr0(RegStorage::kValid | rD0_fr0);
constexpr RegStorage rs_rD1_fr0(RegStorage::kValid | rD1_fr0);
constexpr RegStorage rs_rD2_fr0(RegStorage::kValid | rD2_fr0);
@@ -327,53 +469,65 @@ constexpr RegStorage rs_rD5_fr1(RegStorage::kValid | rD5_fr1);
constexpr RegStorage rs_rD6_fr1(RegStorage::kValid | rD6_fr1);
constexpr RegStorage rs_rD7_fr1(RegStorage::kValid | rD7_fr1);
-// TODO: reduce/eliminate use of these.
-#define rMIPS_SUSPEND rS0
-#define rs_rMIPS_SUSPEND rs_rS0
-#define rMIPS_SELF rS1
-#define rs_rMIPS_SELF rs_rS1
-#define rMIPS_SP rSP
-#define rs_rMIPS_SP rs_rSP
-#define rMIPS_ARG0 rARG0
-#define rs_rMIPS_ARG0 rs_rARG0
-#define rMIPS_ARG1 rARG1
-#define rs_rMIPS_ARG1 rs_rARG1
-#define rMIPS_ARG2 rARG2
-#define rs_rMIPS_ARG2 rs_rARG2
-#define rMIPS_ARG3 rARG3
-#define rs_rMIPS_ARG3 rs_rARG3
-#define rMIPS_FARG0 rFARG0
-#define rs_rMIPS_FARG0 rs_rFARG0
-#define rMIPS_FARG1 rFARG1
-#define rs_rMIPS_FARG1 rs_rFARG1
-#define rMIPS_FARG2 rFARG2
-#define rs_rMIPS_FARG2 rs_rFARG2
-#define rMIPS_FARG3 rFARG3
-#define rs_rMIPS_FARG3 rs_rFARG3
-#define rMIPS_RET0 rRESULT0
-#define rs_rMIPS_RET0 rs_rRESULT0
-#define rMIPS_RET1 rRESULT1
-#define rs_rMIPS_RET1 rs_rRESULT1
-#define rMIPS_INVOKE_TGT rT9
-#define rs_rMIPS_INVOKE_TGT rs_rT9
-#define rMIPS_COUNT RegStorage::kInvalidRegVal
+constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
+constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
+constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
+constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
+constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
+constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
+constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
+constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
+constexpr RegStorage rs_rD8(RegStorage::kValid | rD8);
+constexpr RegStorage rs_rD9(RegStorage::kValid | rD9);
+constexpr RegStorage rs_rD10(RegStorage::kValid | rD10);
+constexpr RegStorage rs_rD11(RegStorage::kValid | rD11);
+constexpr RegStorage rs_rD12(RegStorage::kValid | rD12);
+constexpr RegStorage rs_rD13(RegStorage::kValid | rD13);
+constexpr RegStorage rs_rD14(RegStorage::kValid | rD14);
+constexpr RegStorage rs_rD15(RegStorage::kValid | rD15);
+constexpr RegStorage rs_rD16(RegStorage::kValid | rD16);
+constexpr RegStorage rs_rD17(RegStorage::kValid | rD17);
+constexpr RegStorage rs_rD18(RegStorage::kValid | rD18);
+constexpr RegStorage rs_rD19(RegStorage::kValid | rD19);
+constexpr RegStorage rs_rD20(RegStorage::kValid | rD20);
+constexpr RegStorage rs_rD21(RegStorage::kValid | rD21);
+constexpr RegStorage rs_rD22(RegStorage::kValid | rD22);
+constexpr RegStorage rs_rD23(RegStorage::kValid | rD23);
+constexpr RegStorage rs_rD24(RegStorage::kValid | rD24);
+constexpr RegStorage rs_rD25(RegStorage::kValid | rD25);
+constexpr RegStorage rs_rD26(RegStorage::kValid | rD26);
+constexpr RegStorage rs_rD27(RegStorage::kValid | rD27);
+constexpr RegStorage rs_rD28(RegStorage::kValid | rD28);
+constexpr RegStorage rs_rD29(RegStorage::kValid | rD29);
+constexpr RegStorage rs_rD30(RegStorage::kValid | rD30);
+constexpr RegStorage rs_rD31(RegStorage::kValid | rD31);
// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
const RegLocation mips_loc_c_return
{kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_ref
+ {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
const RegLocation mips_loc_c_return_wide
{kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
RegStorage(RegStorage::k64BitPair, rV0, rV1), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_wide
+ {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
const RegLocation mips_loc_c_return_float
{kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
+// FIXME: move MIPS to k64Bitsolo for doubles
const RegLocation mips_loc_c_return_double_fr0
{kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
RegStorage(RegStorage::k64BitPair, rF0, rF1), INVALID_SREG, INVALID_SREG};
const RegLocation mips_loc_c_return_double_fr1
{kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
RegStorage(RegStorage::k64BitSolo, rF0), INVALID_SREG, INVALID_SREG};
+const RegLocation mips64_loc_c_return_double
+ {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rD0), INVALID_SREG, INVALID_SREG};
enum MipsShiftEncodings {
kMipsLsl = 0x0,
@@ -395,104 +549,136 @@ enum MipsShiftEncodings {
#define kSY kSYNC0
/*
- * The following enum defines the list of supported Thumb instructions by the
+ * The following enum defines the list of supported mips instructions by the
* assembler. Their corresponding EncodingMap positions will be defined in
- * Assemble.cc.
+ * assemble_mips.cc.
*/
enum MipsOpCode {
kMipsFirst = 0,
+ // The following are common mips32r2, mips32r6 and mips64r6 instructions.
kMips32BitData = kMipsFirst, // data [31..0].
- kMipsAddiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
- kMipsAddu, // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
- kMipsAnd, // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
- kMipsAndi, // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
- kMipsB, // b o [0001000000000000] o[15..0].
- kMipsBal, // bal o [0000010000010001] o[15..0].
- // NOTE: the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
- // range may require updates.
- kMipsBeq, // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
- kMipsBeqz, // beqz s,o [000100] s[25..21] [00000] o[15..0].
- kMipsBgez, // bgez s,o [000001] s[25..21] [00001] o[15..0].
- kMipsBgtz, // bgtz s,o [000111] s[25..21] [00000] o[15..0].
- kMipsBlez, // blez s,o [000110] s[25..21] [00000] o[15..0].
- kMipsBltz, // bltz s,o [000001] s[25..21] [00000] o[15..0].
- kMipsBnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
- kMipsBne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
- kMipsDiv, // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
- kMipsExt, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
- kMipsJal, // jal t [000011] t[25..0].
- kMipsJalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
- kMipsJr, // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
- kMipsLahi, // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
- kMipsLalo, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
- kMipsLui, // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
- kMipsLb, // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
- kMipsLbu, // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
- kMipsLh, // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
- kMipsLhu, // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
- kMipsLw, // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
- kMipsMfhi, // mfhi d [0000000000000000] d[15..11] [00000010000].
- kMipsMflo, // mflo d [0000000000000000] d[15..11] [00000010010].
- kMipsMove, // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
- kMipsMovz, // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
- kMipsMul, // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
- kMipsNop, // nop [00000000000000000000000000000000].
- kMipsNor, // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
- kMipsOr, // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
- kMipsOri, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
- kMipsPref, // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
- kMipsSb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
- kMipsSeb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
- kMipsSeh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
- kMipsSh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
- kMipsSll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
- kMipsSllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
- kMipsSlt, // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
- kMipsSlti, // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
- kMipsSltu, // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
- kMipsSra, // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
- kMipsSrav, // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
- kMipsSrl, // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
- kMipsSrlv, // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
- kMipsSubu, // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
- kMipsSw, // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
- kMipsXor, // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
- kMipsXori, // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
- kMipsFadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
- kMipsFsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
- kMipsFmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
- kMipsFdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
- kMipsFaddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
- kMipsFsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
- kMipsFmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
- kMipsFdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
- kMipsFcvtsd, // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
- kMipsFcvtsw, // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
- kMipsFcvtds, // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
- kMipsFcvtdw, // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
- kMipsFcvtws, // cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
- kMipsFcvtwd, // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
- kMipsFmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
- kMipsFmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
- kMipsFlwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
- kMipsFldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
- kMipsFswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
- kMipsFsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
- kMipsMfc1, // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
- kMipsMtc1, // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
- kMipsMfhc1, // mfhc1 t,s [01000100011] t[20..16] s[15..11] [00000000000].
- kMipsMthc1, // mthc1 t,s [01000100111] t[20..16] s[15..11] [00000000000].
- kMipsDelta, // Psuedo for ori t, s, <label>-<label>.
- kMipsDeltaHi, // Pseudo for lui t, high16(<label>-<label>).
- kMipsDeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
- kMipsCurrPC, // jal to .+8 to materialize pc.
- kMipsSync, // sync kind [000000] [0000000000000000] s[10..6] [001111].
-
- // The following are mips32r6 instructions.
- kMipsR6Div, // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
- kMipsR6Mod, // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
- kMipsR6Mul, // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
-
+ kMipsAddiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMipsAddu, // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
+ kMipsAnd, // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
+ kMipsAndi, // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
+ kMipsB, // b o [0001000000000000] o[15..0].
+ kMipsBal, // bal o [0000010000010001] o[15..0].
+ // NOTE : the code tests the range kMipsBeq thru kMipsBne, so adding an instruction in this
+ // range may require updates.
+ kMipsBeq, // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
+ kMipsBeqz, // beqz s,o [000100] s[25..21] [00000] o[15..0].
+ kMipsBgez, // bgez s,o [000001] s[25..21] [00001] o[15..0].
+ kMipsBgtz, // bgtz s,o [000111] s[25..21] [00000] o[15..0].
+ kMipsBlez, // blez s,o [000110] s[25..21] [00000] o[15..0].
+ kMipsBltz, // bltz s,o [000001] s[25..21] [00000] o[15..0].
+ kMipsBnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
+ kMipsBne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
+ kMipsExt, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
+ kMipsFaddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
+ kMipsFadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
+ kMipsFsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
+ kMipsFsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
+ kMipsFdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
+ kMipsFdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
+ kMipsFmuld, // mul.d d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
+ kMipsFmuls, // mul.s d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
+ kMipsFcvtsd, // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
+ kMipsFcvtsw, // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
+ kMipsFcvtds, // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
+ kMipsFcvtdw, // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
+ kMipsFcvtwd, // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
+ kMipsFcvtws, // cvt.w.s d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
+ kMipsFmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
+ kMipsFmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
+ kMipsFnegd, // neg.d d,s [01000110001] [00000] s[15..11] d[10..6] [000111].
+ kMipsFnegs, // neg.s d,s [01000110000] [00000] s[15..11] d[10..6] [000111].
+ kMipsFldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
+ kMipsFlwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
+ kMipsFsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
+ kMipsFswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
+ kMipsJal, // jal t [000011] t[25..0].
+ kMipsJalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
+ kMipsJr, // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
+ kMipsLahi, // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
+ kMipsLalo, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
+ kMipsLui, // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
+ kMipsLb, // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
+ kMipsLbu, // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
+ kMipsLh, // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
+ kMipsLhu, // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
+ kMipsLw, // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
+ kMipsMove, // move d,s [000000] s[25..21] [00000] d[15..11] [00000100101].
+ kMipsMfc1, // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
+ kMipsMtc1, // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
+ kMipsMfhc1, // mfhc1 t,s [01000100011] t[20..16] s[15..11] [00000000000].
+ kMipsMthc1, // mthc1 t,s [01000100111] t[20..16] s[15..11] [00000000000].
+ kMipsNop, // nop [00000000000000000000000000000000].
+ kMipsNor, // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
+ kMipsOr, // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
+ kMipsOri, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
+ kMipsPref, // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
+ kMipsSb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
+ kMipsSeb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
+ kMipsSeh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
+ kMipsSh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
+ kMipsSll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
+ kMipsSllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
+ kMipsSlt, // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
+ kMipsSlti, // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
+ kMipsSltu, // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
+ kMipsSra, // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
+ kMipsSrav, // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
+ kMipsSrl, // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
+ kMipsSrlv, // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
+ kMipsSubu, // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
+ kMipsSw, // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
+ kMipsSync, // sync kind [000000] [0000000000000000] s[10..6] [001111].
+ kMipsXor, // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
+ kMipsXori, // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
+
+ // The following are mips32r2 instructions.
+ kMipsR2Div, // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
+ kMipsR2Mul, // mul d,s,t [011100] s[25..21] t[20..16] d[15..11] [00000000010].
+ kMipsR2Mfhi, // mfhi d [0000000000000000] d[15..11] [00000010000].
+ kMipsR2Mflo, // mflo d [0000000000000000] d[15..11] [00000010010].
+ kMipsR2Movz, // movz d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000001010].
+
+ // The following are mips32r6 and mips64r6 instructions.
+ kMipsR6Div, // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
+ kMipsR6Mod, // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
+ kMipsR6Mul, // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
+
+ // The following are mips64r6 instructions.
+ kMips64Daddiu, // daddiu t,s,imm16 [011001] s[25..21] t[20..16] imm16[15..11].
+ kMips64Daddu, // daddu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101101].
+ kMips64Dahi, // dahi s,imm16 [000001] s[25..21] [00110] imm16[15..11].
+ kMips64Dati, // dati s,imm16 [000001] s[25..21] [11110] imm16[15..11].
+ kMips64Daui, // daui t,s,imm16 [011101] s[25..21] t[20..16] imm16[15..11].
+ kMips64Ddiv, // ddiv d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011110].
+ kMips64Dmod, // dmod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011110].
+ kMips64Dmul, // dmul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011100].
+ kMips64Dmfc1, // dmfc1 t,s [01000100001] t[20..16] s[15..11] [00000000000].
+ kMips64Dmtc1, // dmtc1 t,s [01000100101] t[20..16] s[15..11] [00000000000].
+ kMips64Drotr32, // drotr32 d,t,a [00000000001] t[20..16] d[15..11] a[10..6] [111110].
+ kMips64Dsll, // dsll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111000].
+ kMips64Dsll32, // dsll32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111100].
+ kMips64Dsrl, // dsrl d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111010].
+ kMips64Dsrl32, // dsrl32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111110].
+ kMips64Dsra, // dsra d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111011].
+ kMips64Dsra32, // dsra32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111111].
+ kMips64Dsllv, // dsllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010100].
+ kMips64Dsrlv, // dsrlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010110].
+ kMips64Dsrav, // dsrav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010111].
+ kMips64Dsubu, // dsubu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101111].
+ kMips64Ld, // ld t,o(b) [110111] b[25..21] t[20..16] o[15..0].
+ kMips64Lwu, // lwu t,o(b) [100111] b[25..21] t[20..16] o[15..0].
+ kMips64Sd, // sd t,o(b) [111111] b[25..21] t[20..16] o[15..0].
+
+ // The following are pseudoinstructions.
+ kMipsDelta, // Psuedo for ori t, s, <label>-<label>.
+ kMipsDeltaHi, // Pseudo for lui t, high16(<label>-<label>).
+ kMipsDeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
+ kMipsCurrPC, // jal to .+8 to materialize pc.
kMipsUndefined, // undefined [011001xxxxxxxxxxxxxxxx].
kMipsLast
};
@@ -503,7 +689,7 @@ enum MipsEncodingKind {
kFmtUnused,
kFmtBitBlt, // Bit string using end/start.
kFmtDfp, // Double FP reg.
- kFmtSfp, // Single FP reg
+ kFmtSfp, // Single FP reg.
kFmtBlt5_2, // Same 5-bit field to 2 locations.
};
std::ostream& operator<<(std::ostream& os, const MipsEncodingKind& rhs);
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 830f63ac5f..a94fad7534 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -30,55 +30,131 @@
namespace art {
-static constexpr RegStorage core_regs_arr[] =
- {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0, rs_rT1, rs_rT2,
- rs_rT3, rs_rT4, rs_rT5, rs_rT6, rs_rT7, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5,
- rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
-static constexpr RegStorage sp_regs_arr[] =
+static constexpr RegStorage core_regs_arr_32[] =
+ {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32,
+ rs_rT2_32, rs_rT3_32, rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rS0, rs_rS1, rs_rS2,
+ rs_rS3, rs_rS4, rs_rS5, rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP,
+ rs_rRA};
+static constexpr RegStorage sp_regs_arr_32[] =
{rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_fr0_regs_arr[] =
+static constexpr RegStorage dp_fr0_regs_arr_32[] =
{rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
rs_rD7_fr0};
-static constexpr RegStorage dp_fr1_regs_arr[] =
+static constexpr RegStorage dp_fr1_regs_arr_32[] =
{rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
rs_rD7_fr1};
-static constexpr RegStorage reserved_regs_arr[] =
+static constexpr RegStorage reserved_regs_arr_32[] =
{rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
-static constexpr RegStorage core_temps_arr[] =
- {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rT4,
- rs_rT5, rs_rT6, rs_rT7, rs_rT8};
-static constexpr RegStorage sp_temps_arr[] =
+static constexpr RegStorage core_temps_arr_32[] =
+ {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rT0_32, rs_rT1_32, rs_rT2_32, rs_rT3_32,
+ rs_rT4_32, rs_rT5_32, rs_rT6_32, rs_rT7_32, rs_rT8};
+static constexpr RegStorage sp_temps_arr_32[] =
{rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15};
-static constexpr RegStorage dp_fr0_temps_arr[] =
+static constexpr RegStorage dp_fr0_temps_arr_32[] =
{rs_rD0_fr0, rs_rD1_fr0, rs_rD2_fr0, rs_rD3_fr0, rs_rD4_fr0, rs_rD5_fr0, rs_rD6_fr0,
rs_rD7_fr0};
-static constexpr RegStorage dp_fr1_temps_arr[] =
+static constexpr RegStorage dp_fr1_temps_arr_32[] =
{rs_rD0_fr1, rs_rD1_fr1, rs_rD2_fr1, rs_rD3_fr1, rs_rD4_fr1, rs_rD5_fr1, rs_rD6_fr1,
rs_rD7_fr1};
+static constexpr RegStorage core_regs_arr_64[] =
+ {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
+ rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5, rs_rS6,
+ rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
+static constexpr RegStorage core_regs_arr_64d[] =
+ {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
+ rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
+ rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
+ rs_rFPd, rs_rRAd};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_regs_arr_64[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+ rs_rF31};
+static constexpr RegStorage dp_regs_arr_64[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+ rs_rD31};
+#else
+static constexpr RegStorage sp_regs_arr_64[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_regs_arr_64[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23};
+#endif
+static constexpr RegStorage reserved_regs_arr_64[] =
+ {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
+static constexpr RegStorage reserved_regs_arr_64d[] =
+ {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
+static constexpr RegStorage core_temps_arr_64[] =
+ {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0, rs_rT1,
+ rs_rT2, rs_rT3, rs_rT8};
+static constexpr RegStorage core_temps_arr_64d[] =
+ {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
+ rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
+#if 0
+// TODO: f24-f31 must be saved before calls and restored after.
+static constexpr RegStorage sp_temps_arr_64[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
+ rs_rF31};
+static constexpr RegStorage dp_temps_arr_64[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
+ rs_rD31};
+#else
+static constexpr RegStorage sp_temps_arr_64[] =
+ {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
+ rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
+ rs_rF21, rs_rF22, rs_rF23};
+static constexpr RegStorage dp_temps_arr_64[] =
+ {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
+ rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
+ rs_rD21, rs_rD22, rs_rD23};
+#endif
+
static constexpr ArrayRef<const RegStorage> empty_pool;
-static constexpr ArrayRef<const RegStorage> core_regs(core_regs_arr);
-static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr0_regs(dp_fr0_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr1_regs(dp_fr1_regs_arr);
-static constexpr ArrayRef<const RegStorage> reserved_regs(reserved_regs_arr);
-static constexpr ArrayRef<const RegStorage> core_temps(core_temps_arr);
-static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr0_temps(dp_fr0_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_fr1_temps(dp_fr1_temps_arr);
+static constexpr ArrayRef<const RegStorage> core_regs_32(core_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> sp_regs_32(sp_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr0_regs_32(dp_fr0_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr1_regs_32(dp_fr1_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> reserved_regs_32(reserved_regs_arr_32);
+static constexpr ArrayRef<const RegStorage> core_temps_32(core_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> sp_temps_32(sp_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr0_temps_32(dp_fr0_temps_arr_32);
+static constexpr ArrayRef<const RegStorage> dp_fr1_temps_32(dp_fr1_temps_arr_32);
+
+static constexpr ArrayRef<const RegStorage> core_regs_64(core_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> core_regs_64d(core_regs_arr_64d);
+static constexpr ArrayRef<const RegStorage> sp_regs_64(sp_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> dp_regs_64(dp_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> reserved_regs_64(reserved_regs_arr_64);
+static constexpr ArrayRef<const RegStorage> reserved_regs_64d(reserved_regs_arr_64d);
+static constexpr ArrayRef<const RegStorage> core_temps_64(core_temps_arr_64);
+static constexpr ArrayRef<const RegStorage> core_temps_64d(core_temps_arr_64d);
+static constexpr ArrayRef<const RegStorage> sp_temps_64(sp_temps_arr_64);
+static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
RegLocation MipsMir2Lir::LocCReturn() {
return mips_loc_c_return;
}
RegLocation MipsMir2Lir::LocCReturnRef() {
- return mips_loc_c_return;
+ return cu_->target64 ? mips64_loc_c_return_ref : mips_loc_c_return;
}
RegLocation MipsMir2Lir::LocCReturnWide() {
- return mips_loc_c_return_wide;
+ return cu_->target64 ? mips64_loc_c_return_wide : mips_loc_c_return_wide;
}
RegLocation MipsMir2Lir::LocCReturnFloat() {
@@ -86,14 +162,16 @@ RegLocation MipsMir2Lir::LocCReturnFloat() {
}
RegLocation MipsMir2Lir::LocCReturnDouble() {
- if (fpuIs32Bit_) {
- return mips_loc_c_return_double_fr0;
+ if (cu_->target64) {
+ return mips64_loc_c_return_double;
+ } else if (fpuIs32Bit_) {
+ return mips_loc_c_return_double_fr0;
} else {
- return mips_loc_c_return_double_fr1;
+ return mips_loc_c_return_double_fr1;
}
}
-// Convert k64BitSolo into k64BitPair
+// Convert k64BitSolo into k64BitPair.
RegStorage MipsMir2Lir::Solo64ToPair64(RegStorage reg) {
DCHECK(reg.IsDouble());
DCHECK_EQ(reg.GetRegNum() & 1, 0);
@@ -113,16 +191,18 @@ RegStorage MipsMir2Lir::Fp64ToSolo32(RegStorage reg) {
// Return a target-dependent special register.
RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
- if (wide_kind == kWide) {
- DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
- RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
- TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
- if (!fpuIs32Bit_ && ret_reg.IsFloat()) {
- // convert 64BitPair to 64BitSolo for 64bit FPUs.
- RegStorage low = ret_reg.GetLow();
- ret_reg = RegStorage::FloatSolo64(low.GetRegNum());
- }
- return ret_reg;
+ if (!cu_->target64 && wide_kind == kWide) {
+ DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
+ RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
+ TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
+ if (!fpuIs32Bit_ && ret_reg.IsFloat()) {
+ // convert 64BitPair to 64BitSolo for 64bit FPUs.
+ RegStorage low = ret_reg.GetLow();
+ ret_reg = RegStorage::FloatSolo64(low.GetRegNum());
+ }
+ return ret_reg;
+ } else if (cu_->target64 && (wide_kind == kWide || wide_kind == kRef)) {
+ return As64BitReg(TargetReg(reg));
} else {
return TargetReg(reg);
}
@@ -132,25 +212,33 @@ RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg, WideKind wide_kind)
RegStorage MipsMir2Lir::TargetReg(SpecialTargetRegister reg) {
RegStorage res_reg;
switch (reg) {
- case kSelf: res_reg = rs_rMIPS_SELF; break;
- case kSuspend: res_reg = rs_rMIPS_SUSPEND; break;
- case kLr: res_reg = rs_rMIPS_LR; break;
- case kPc: res_reg = rs_rMIPS_PC; break;
- case kSp: res_reg = rs_rMIPS_SP; break;
- case kArg0: res_reg = rs_rMIPS_ARG0; break;
- case kArg1: res_reg = rs_rMIPS_ARG1; break;
- case kArg2: res_reg = rs_rMIPS_ARG2; break;
- case kArg3: res_reg = rs_rMIPS_ARG3; break;
- case kFArg0: res_reg = rs_rMIPS_FARG0; break;
- case kFArg1: res_reg = rs_rMIPS_FARG1; break;
- case kFArg2: res_reg = rs_rMIPS_FARG2; break;
- case kFArg3: res_reg = rs_rMIPS_FARG3; break;
- case kRet0: res_reg = rs_rMIPS_RET0; break;
- case kRet1: res_reg = rs_rMIPS_RET1; break;
- case kInvokeTgt: res_reg = rs_rMIPS_INVOKE_TGT; break;
- case kHiddenArg: res_reg = rs_rT0; break;
+ case kSelf: res_reg = rs_rS1; break;
+ case kSuspend: res_reg = rs_rS0; break;
+ case kLr: res_reg = rs_rRA; break;
+ case kPc: res_reg = RegStorage::InvalidReg(); break;
+ case kSp: res_reg = rs_rSP; break;
+ case kArg0: res_reg = rs_rA0; break;
+ case kArg1: res_reg = rs_rA1; break;
+ case kArg2: res_reg = rs_rA2; break;
+ case kArg3: res_reg = rs_rA3; break;
+ case kArg4: res_reg = cu_->target64 ? rs_rA4 : RegStorage::InvalidReg(); break;
+ case kArg5: res_reg = cu_->target64 ? rs_rA5 : RegStorage::InvalidReg(); break;
+ case kArg6: res_reg = cu_->target64 ? rs_rA6 : RegStorage::InvalidReg(); break;
+ case kArg7: res_reg = cu_->target64 ? rs_rA7 : RegStorage::InvalidReg(); break;
+ case kFArg0: res_reg = rs_rF12; break;
+ case kFArg1: res_reg = rs_rF13; break;
+ case kFArg2: res_reg = rs_rF14; break;
+ case kFArg3: res_reg = rs_rF15; break;
+ case kFArg4: res_reg = cu_->target64 ? rs_rF16 : RegStorage::InvalidReg(); break;
+ case kFArg5: res_reg = cu_->target64 ? rs_rF17 : RegStorage::InvalidReg(); break;
+ case kFArg6: res_reg = cu_->target64 ? rs_rF18 : RegStorage::InvalidReg(); break;
+ case kFArg7: res_reg = cu_->target64 ? rs_rF19 : RegStorage::InvalidReg(); break;
+ case kRet0: res_reg = rs_rV0; break;
+ case kRet1: res_reg = rs_rV1; break;
+ case kInvokeTgt: res_reg = rs_rT9; break;
+ case kHiddenArg: res_reg = cu_->target64 ? rs_rT0 : rs_rT0_32; break;
case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
- case kCount: res_reg = rs_rMIPS_COUNT; break;
+ case kCount: res_reg = RegStorage::InvalidReg(); break;
default: res_reg = RegStorage::InvalidReg();
}
return res_reg;
@@ -172,27 +260,54 @@ RegStorage MipsMir2Lir::InToRegStorageMipsMapper::GetNextReg(ShortyArg arg) {
return result;
}
+RegStorage MipsMir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
+ const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
+ {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
+ const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
+ const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
+ {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
+ const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+
+ RegStorage result = RegStorage::InvalidReg();
+ if (arg.IsFP()) {
+ if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
+ DCHECK(!arg.IsRef());
+ result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
+ arg.IsWide() ? kWide : kNotWide);
+ }
+ } else {
+ if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
+ DCHECK(!(arg.IsWide() && arg.IsRef()));
+ result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
+ arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
+ }
+ }
+ return result;
+}
+
/*
* Decode the register id.
*/
ResourceMask MipsMir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
- if (reg.IsDouble()) {
- return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
- } else if (reg.IsSingle()) {
- return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
+ if (cu_->target64) {
+ return ResourceMask::Bit((reg.IsFloat() ? kMipsFPReg0 : 0) + reg.GetRegNum());
} else {
- return ResourceMask::Bit(reg.GetRegNum());
+ if (reg.IsDouble()) {
+ return ResourceMask::TwoBits((reg.GetRegNum() & ~1) + kMipsFPReg0);
+ } else if (reg.IsSingle()) {
+ return ResourceMask::Bit(reg.GetRegNum() + kMipsFPReg0);
+ } else {
+ return ResourceMask::Bit(reg.GetRegNum());
+ }
}
}
ResourceMask MipsMir2Lir::GetPCUseDefEncoding() const {
- return ResourceMask::Bit(kMipsRegPC);
+ return cu_->target64 ? ResourceMask::Bit(kMips64RegPC) : ResourceMask::Bit(kMipsRegPC);
}
-
-void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
- ResourceMask* use_mask, ResourceMask* def_mask) {
- DCHECK_EQ(cu_->instruction_set, kMips);
+void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
+ ResourceMask* def_mask) {
DCHECK(!lir->flags.use_def_invalid);
// Mips-specific resource map setup here.
@@ -208,20 +323,22 @@ void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
def_mask->SetBit(kMipsRegLR);
}
- if (flags & REG_DEF_HI) {
- def_mask->SetBit(kMipsRegHI);
- }
+ if (!cu_->target64) {
+ if (flags & REG_DEF_HI) {
+ def_mask->SetBit(kMipsRegHI);
+ }
- if (flags & REG_DEF_LO) {
- def_mask->SetBit(kMipsRegLO);
- }
+ if (flags & REG_DEF_LO) {
+ def_mask->SetBit(kMipsRegLO);
+ }
- if (flags & REG_USE_HI) {
- use_mask->SetBit(kMipsRegHI);
- }
+ if (flags & REG_USE_HI) {
+ use_mask->SetBit(kMipsRegHI);
+ }
- if (flags & REG_USE_LO) {
- use_mask->SetBit(kMipsRegLO);
+ if (flags & REG_USE_LO) {
+ use_mask->SetBit(kMipsRegLO);
+ }
}
}
@@ -234,9 +351,16 @@ static const char *mips_reg_name[MIPS_REG_COUNT] = {
"t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
};
+static const char *mips64_reg_name[MIPS_REG_COUNT] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
+};
+
/*
* Interpret a format string and build a string no longer than size
- * See format key in Assemble.c.
+ * See format key in assemble_mips.cc.
*/
std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
std::string buf;
@@ -311,7 +435,11 @@ std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned cha
break;
case 'r':
DCHECK(operand >= 0 && operand < MIPS_REG_COUNT);
- strcpy(tbuf, mips_reg_name[operand]);
+ if (cu_->target64) {
+ strcpy(tbuf, mips64_reg_name[operand]);
+ } else {
+ strcpy(tbuf, mips_reg_name[operand]);
+ }
break;
case 'N':
// Placeholder for delay slot handling
@@ -330,7 +458,7 @@ std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned cha
return buf;
}
-// FIXME: need to redo resource maps for MIPS - fix this at that time
+// FIXME: need to redo resource maps for MIPS - fix this at that time.
void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, const ResourceMask& mask, const char *prefix) {
char buf[256];
buf[0] = 0;
@@ -341,7 +469,7 @@ void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, const ResourceMask& mask, cons
char num[8];
int i;
- for (i = 0; i < kMipsRegEnd; i++) {
+ for (i = 0; i < (cu_->target64 ? kMips64RegEnd : kMipsRegEnd); i++) {
if (mask.HasBit(i)) {
snprintf(num, arraysize(num), "%d ", i);
strcat(buf, num);
@@ -354,7 +482,7 @@ void MipsMir2Lir::DumpResourceMask(LIR *mips_lir, const ResourceMask& mask, cons
if (mask.HasBit(ResourceMask::kFPStatus)) {
strcat(buf, "fpcc ");
}
- /* Memory bits */
+ // Memory bits.
if (mips_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
DECODE_ALIAS_INFO_REG(mips_lir->flags.alias_info),
@@ -389,63 +517,114 @@ void MipsMir2Lir::AdjustSpillMask() {
/* Clobber all regs that might be used by an external C call */
void MipsMir2Lir::ClobberCallerSave() {
- Clobber(rs_rZERO);
- Clobber(rs_rAT);
- Clobber(rs_rV0);
- Clobber(rs_rV1);
- Clobber(rs_rA0);
- Clobber(rs_rA1);
- Clobber(rs_rA2);
- Clobber(rs_rA3);
- Clobber(rs_rT0);
- Clobber(rs_rT1);
- Clobber(rs_rT2);
- Clobber(rs_rT3);
- Clobber(rs_rT4);
- Clobber(rs_rT5);
- Clobber(rs_rT6);
- Clobber(rs_rT7);
- Clobber(rs_rT8);
- Clobber(rs_rT9);
- Clobber(rs_rK0);
- Clobber(rs_rK1);
- Clobber(rs_rGP);
- Clobber(rs_rFP);
- Clobber(rs_rRA);
- Clobber(rs_rF0);
- Clobber(rs_rF1);
- Clobber(rs_rF2);
- Clobber(rs_rF3);
- Clobber(rs_rF4);
- Clobber(rs_rF5);
- Clobber(rs_rF6);
- Clobber(rs_rF7);
- Clobber(rs_rF8);
- Clobber(rs_rF9);
- Clobber(rs_rF10);
- Clobber(rs_rF11);
- Clobber(rs_rF12);
- Clobber(rs_rF13);
- Clobber(rs_rF14);
- Clobber(rs_rF15);
- if (fpuIs32Bit_) {
- Clobber(rs_rD0_fr0);
- Clobber(rs_rD1_fr0);
- Clobber(rs_rD2_fr0);
- Clobber(rs_rD3_fr0);
- Clobber(rs_rD4_fr0);
- Clobber(rs_rD5_fr0);
- Clobber(rs_rD6_fr0);
- Clobber(rs_rD7_fr0);
+ if (cu_->target64) {
+ Clobber(rs_rZEROd);
+ Clobber(rs_rATd);
+ Clobber(rs_rV0d);
+ Clobber(rs_rV1d);
+ Clobber(rs_rA0d);
+ Clobber(rs_rA1d);
+ Clobber(rs_rA2d);
+ Clobber(rs_rA3d);
+ Clobber(rs_rA4d);
+ Clobber(rs_rA5d);
+ Clobber(rs_rA6d);
+ Clobber(rs_rA7d);
+ Clobber(rs_rT0d);
+ Clobber(rs_rT1d);
+ Clobber(rs_rT2d);
+ Clobber(rs_rT3d);
+ Clobber(rs_rT8d);
+ Clobber(rs_rT9d);
+ Clobber(rs_rK0d);
+ Clobber(rs_rK1d);
+ Clobber(rs_rGPd);
+ Clobber(rs_rFPd);
+ Clobber(rs_rRAd);
+
+ Clobber(rs_rF0);
+ Clobber(rs_rF1);
+ Clobber(rs_rF2);
+ Clobber(rs_rF3);
+ Clobber(rs_rF4);
+ Clobber(rs_rF5);
+ Clobber(rs_rF6);
+ Clobber(rs_rF7);
+ Clobber(rs_rF8);
+ Clobber(rs_rF9);
+ Clobber(rs_rF10);
+ Clobber(rs_rF11);
+ Clobber(rs_rF12);
+ Clobber(rs_rF13);
+ Clobber(rs_rF14);
+ Clobber(rs_rF15);
+ Clobber(rs_rD0);
+ Clobber(rs_rD1);
+ Clobber(rs_rD2);
+ Clobber(rs_rD3);
+ Clobber(rs_rD4);
+ Clobber(rs_rD5);
+ Clobber(rs_rD6);
+ Clobber(rs_rD7);
} else {
- Clobber(rs_rD0_fr1);
- Clobber(rs_rD1_fr1);
- Clobber(rs_rD2_fr1);
- Clobber(rs_rD3_fr1);
- Clobber(rs_rD4_fr1);
- Clobber(rs_rD5_fr1);
- Clobber(rs_rD6_fr1);
- Clobber(rs_rD7_fr1);
+ Clobber(rs_rZERO);
+ Clobber(rs_rAT);
+ Clobber(rs_rV0);
+ Clobber(rs_rV1);
+ Clobber(rs_rA0);
+ Clobber(rs_rA1);
+ Clobber(rs_rA2);
+ Clobber(rs_rA3);
+ Clobber(rs_rT0_32);
+ Clobber(rs_rT1_32);
+ Clobber(rs_rT2_32);
+ Clobber(rs_rT3_32);
+ Clobber(rs_rT4_32);
+ Clobber(rs_rT5_32);
+ Clobber(rs_rT6_32);
+ Clobber(rs_rT7_32);
+ Clobber(rs_rT8);
+ Clobber(rs_rT9);
+ Clobber(rs_rK0);
+ Clobber(rs_rK1);
+ Clobber(rs_rGP);
+ Clobber(rs_rFP);
+ Clobber(rs_rRA);
+ Clobber(rs_rF0);
+ Clobber(rs_rF1);
+ Clobber(rs_rF2);
+ Clobber(rs_rF3);
+ Clobber(rs_rF4);
+ Clobber(rs_rF5);
+ Clobber(rs_rF6);
+ Clobber(rs_rF7);
+ Clobber(rs_rF8);
+ Clobber(rs_rF9);
+ Clobber(rs_rF10);
+ Clobber(rs_rF11);
+ Clobber(rs_rF12);
+ Clobber(rs_rF13);
+ Clobber(rs_rF14);
+ Clobber(rs_rF15);
+ if (fpuIs32Bit_) {
+ Clobber(rs_rD0_fr0);
+ Clobber(rs_rD1_fr0);
+ Clobber(rs_rD2_fr0);
+ Clobber(rs_rD3_fr0);
+ Clobber(rs_rD4_fr0);
+ Clobber(rs_rD5_fr0);
+ Clobber(rs_rD6_fr0);
+ Clobber(rs_rD7_fr0);
+ } else {
+ Clobber(rs_rD0_fr1);
+ Clobber(rs_rD1_fr1);
+ Clobber(rs_rD2_fr1);
+ Clobber(rs_rD3_fr1);
+ Clobber(rs_rD4_fr1);
+ Clobber(rs_rD5_fr1);
+ Clobber(rs_rD6_fr1);
+ Clobber(rs_rD7_fr1);
+ }
}
}
@@ -463,18 +642,30 @@ RegLocation MipsMir2Lir::GetReturnAlt() {
/* To be used when explicitly managing register use */
void MipsMir2Lir::LockCallTemps() {
- LockTemp(rs_rMIPS_ARG0);
- LockTemp(rs_rMIPS_ARG1);
- LockTemp(rs_rMIPS_ARG2);
- LockTemp(rs_rMIPS_ARG3);
+ LockTemp(TargetReg(kArg0));
+ LockTemp(TargetReg(kArg1));
+ LockTemp(TargetReg(kArg2));
+ LockTemp(TargetReg(kArg3));
+ if (cu_->target64) {
+ LockTemp(TargetReg(kArg4));
+ LockTemp(TargetReg(kArg5));
+ LockTemp(TargetReg(kArg6));
+ LockTemp(TargetReg(kArg7));
+ }
}
/* To be used when explicitly managing register use */
void MipsMir2Lir::FreeCallTemps() {
- FreeTemp(rs_rMIPS_ARG0);
- FreeTemp(rs_rMIPS_ARG1);
- FreeTemp(rs_rMIPS_ARG2);
- FreeTemp(rs_rMIPS_ARG3);
+ FreeTemp(TargetReg(kArg0));
+ FreeTemp(TargetReg(kArg1));
+ FreeTemp(TargetReg(kArg2));
+ FreeTemp(TargetReg(kArg3));
+ if (cu_->target64) {
+ FreeTemp(TargetReg(kArg4));
+ FreeTemp(TargetReg(kArg5));
+ FreeTemp(TargetReg(kArg6));
+ FreeTemp(TargetReg(kArg7));
+ }
FreeTemp(TargetReg(kHiddenArg));
}
@@ -488,31 +679,63 @@ bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
}
void MipsMir2Lir::CompilerInitializeRegAlloc() {
- reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs, empty_pool /* core64 */,
- sp_regs,
- fpuIs32Bit_ ? dp_fr0_regs : dp_fr1_regs,
- reserved_regs, empty_pool /* reserved64 */,
- core_temps, empty_pool /* core64_temps */,
- sp_temps,
- fpuIs32Bit_ ? dp_fr0_temps : dp_fr1_temps));
-
- // Target-specific adjustments.
-
- // Alias single precision floats to appropriate half of overlapping double.
- for (RegisterInfo* info : reg_pool_->sp_regs_) {
- int sp_reg_num = info->GetReg().GetRegNum();
- int dp_reg_num = sp_reg_num & ~1;
- RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
- RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
- // Double precision register's master storage should refer to itself.
- DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
- // Redirect single precision's master storage to master.
- info->SetMaster(dp_reg_info);
- // Singles should show a single 32-bit mask bit, at first referring to the low half.
- DCHECK_EQ(info->StorageMask(), 0x1U);
- if (sp_reg_num & 1) {
- // For odd singles, change to user the high word of the backing double.
- info->SetStorageMask(0x2);
+ if (cu_->target64) {
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64d, sp_regs_64,
+ dp_regs_64, reserved_regs_64, reserved_regs_64d,
+ core_temps_64, core_temps_64d, sp_temps_64,
+ dp_temps_64));
+
+ // Alias single precision floats to appropriate half of overlapping double.
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
+ int sp_reg_num = info->GetReg().GetRegNum();
+ int dp_reg_num = sp_reg_num;
+ RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+ RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+ // Double precision register's master storage should refer to itself.
+ DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+ // Redirect single precision's master storage to master.
+ info->SetMaster(dp_reg_info);
+ // Singles should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ }
+
+ // Alias 32bit W registers to corresponding 64bit X registers.
+ for (RegisterInfo* info : reg_pool_->core_regs_) {
+ int d_reg_num = info->GetReg().GetRegNum();
+ RegStorage d_reg = RegStorage::Solo64(d_reg_num);
+ RegisterInfo* d_reg_info = GetRegInfo(d_reg);
+ // 64bit D register's master storage should refer to itself.
+ DCHECK_EQ(d_reg_info, d_reg_info->Master());
+ // Redirect 32bit master storage to 64bit D.
+ info->SetMaster(d_reg_info);
+ // 32bit should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ }
+ } else {
+ reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs_32, empty_pool, // core64
+ sp_regs_32,
+ fpuIs32Bit_ ? dp_fr0_regs_32 : dp_fr1_regs_32,
+ reserved_regs_32, empty_pool, // reserved64
+ core_temps_32, empty_pool, // core64_temps
+ sp_temps_32,
+ fpuIs32Bit_ ? dp_fr0_temps_32 : dp_fr1_temps_32));
+
+ // Alias single precision floats to appropriate half of overlapping double.
+ for (RegisterInfo* info : reg_pool_->sp_regs_) {
+ int sp_reg_num = info->GetReg().GetRegNum();
+ int dp_reg_num = sp_reg_num & ~1;
+ RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
+ RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
+ // Double precision register's master storage should refer to itself.
+ DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
+ // Redirect single precision's master storage to master.
+ info->SetMaster(dp_reg_info);
+ // Singles should show a single 32-bit mask bit, at first referring to the low half.
+ DCHECK_EQ(info->StorageMask(), 0x1U);
+ if (sp_reg_num & 1) {
+ // For odd singles, change to user the high word of the backing double.
+ info->SetStorageMask(0x2);
+ }
}
}
@@ -520,7 +743,11 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() {
// TODO: adjust when we roll to hard float calling convention.
reg_pool_->next_core_reg_ = 2;
reg_pool_->next_sp_reg_ = 2;
- reg_pool_->next_dp_reg_ = 2;
+ if (cu_->target64) {
+ reg_pool_->next_dp_reg_ = 1;
+ } else {
+ reg_pool_->next_dp_reg_ = 2;
+ }
}
/*
@@ -531,14 +758,24 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() {
*/
RegStorage MipsMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
// NOTE: native pointer.
- LoadWordDisp(rs_rMIPS_SELF, GetThreadOffset<4>(trampoline).Int32Value(), rs_rT9);
- return rs_rT9;
+ if (cu_->target64) {
+ LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<8>(trampoline).Int32Value(),
+ TargetPtrReg(kInvokeTgt));
+ } else {
+ LoadWordDisp(TargetPtrReg(kSelf), GetThreadOffset<4>(trampoline).Int32Value(),
+ TargetPtrReg(kInvokeTgt));
+ }
+ return TargetPtrReg(kInvokeTgt);
}
LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
RegStorage tmp = AllocTemp();
// NOTE: native pointer.
- LoadWordDisp(rs_rMIPS_SELF, Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
+ if (cu_->target64) {
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
+ } else {
+ LoadWordDisp(TargetPtrReg(kSelf), Thread::ThreadSuspendTriggerOffset<4>().Int32Value(), tmp);
+ }
LIR *inst = LoadWordDisp(tmp, 0, tmp);
FreeTemp(tmp);
return inst;
@@ -546,31 +783,47 @@ LIR* MipsMir2Lir::CheckSuspendUsingLoad() {
LIR* MipsMir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadStore().
- DCHECK(r_dest.IsPair());
+ if (!cu_->target64) {
+ DCHECK(r_dest.IsPair());
+ }
ClobberCallerSave();
- LockCallTemps(); // Using fixed registers
+ LockCallTemps(); // Using fixed registers.
RegStorage reg_ptr = TargetReg(kArg0);
OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
RegStorage r_tgt = LoadHelper(kQuickA64Load);
LIR *ret = OpReg(kOpBlx, r_tgt);
- RegStorage reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
- OpRegCopyWide(r_dest, reg_ret);
+ RegStorage reg_ret;
+ if (cu_->target64) {
+ OpRegCopy(r_dest, TargetReg(kRet0));
+ } else {
+ reg_ret = RegStorage::MakeRegPair(TargetReg(kRet0), TargetReg(kRet1));
+ OpRegCopyWide(r_dest, reg_ret);
+ }
return ret;
}
LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore().
- DCHECK(r_src.IsPair());
+ if (cu_->target64) {
+ DCHECK(!r_src.IsPair());
+ } else {
+ DCHECK(r_src.IsPair());
+ }
ClobberCallerSave();
- LockCallTemps(); // Using fixed registers
+ LockCallTemps(); // Using fixed registers.
RegStorage temp_ptr = AllocTemp();
OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
RegStorage temp_value = AllocTempWide();
OpRegCopyWide(temp_value, r_src);
- RegStorage reg_ptr = TargetReg(kArg0);
- OpRegCopy(reg_ptr, temp_ptr);
- RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
- OpRegCopyWide(reg_value, temp_value);
+ if (cu_->target64) {
+ OpRegCopyWide(TargetReg(kArg0, kWide), temp_ptr);
+ OpRegCopyWide(TargetReg(kArg1, kWide), temp_value);
+ } else {
+ RegStorage reg_ptr = TargetReg(kArg0);
+ OpRegCopy(reg_ptr, temp_ptr);
+ RegStorage reg_value = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
+ OpRegCopyWide(reg_value, temp_value);
+ }
FreeTemp(temp_ptr);
FreeTemp(temp_value);
RegStorage r_tgt = LoadHelper(kQuickA64Store);
@@ -582,12 +835,15 @@ void MipsMir2Lir::SpillCoreRegs() {
return;
}
uint32_t mask = core_spill_mask_;
- int offset = num_core_spills_ * 4;
- OpRegImm(kOpSub, rs_rSP, offset);
+ int ptr_size = cu_->target64 ? 8 : 4;
+ int offset = num_core_spills_ * ptr_size;
+ const RegStorage rs_sp = TargetPtrReg(kSp);
+ OpRegImm(kOpSub, rs_sp, offset);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- offset -= 4;
- Store32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
+ offset -= ptr_size;
+ StoreWordDisp(rs_sp, offset,
+ cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
}
}
}
@@ -597,14 +853,17 @@ void MipsMir2Lir::UnSpillCoreRegs() {
return;
}
uint32_t mask = core_spill_mask_;
- int offset = frame_size_;
+ int offset = frame_size_;
+ int ptr_size = cu_->target64 ? 8 : 4;
+ const RegStorage rs_sp = TargetPtrReg(kSp);
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- offset -= 4;
- Load32Disp(rs_rMIPS_SP, offset, RegStorage::Solo32(reg));
+ offset -= ptr_size;
+ LoadWordDisp(rs_sp, offset,
+ cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg));
}
}
- OpRegImm(kOpAdd, rs_rSP, frame_size_);
+ OpRegImm(kOpAdd, rs_sp, frame_size_);
}
bool MipsMir2Lir::IsUnconditionalBranch(LIR* lir) {
@@ -624,11 +883,12 @@ RegisterClass MipsMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volati
}
MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
- : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips_mapper_(this),
- isaIsR6_(cu->compiler_driver->GetInstructionSetFeatures()
- ->AsMipsInstructionSetFeatures()->IsR6()),
- fpuIs32Bit_(cu->compiler_driver->GetInstructionSetFeatures()
- ->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
+ : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this),
+ in_to_reg_storage_mips_mapper_(this),
+ isaIsR6_(cu_->target64 ? true : cu->compiler_driver->GetInstructionSetFeatures()
+ ->AsMipsInstructionSetFeatures()->IsR6()),
+ fpuIs32Bit_(cu_->target64 ? false : cu->compiler_driver->GetInstructionSetFeatures()
+ ->AsMipsInstructionSetFeatures()->Is32BitFloatingPoint()) {
for (int i = 0; i < kMipsLast; i++) {
DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
<< "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 3b7e0ed23b..bf0e0fc78b 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -26,30 +26,70 @@
namespace art {
-/* This file contains codegen for the MIPS32 ISA. */
+/* This file contains codegen for the Mips ISA */
LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
int opcode;
- /* must be both DOUBLE or both not DOUBLE */
- DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
- if (r_dest.IsDouble()) {
- opcode = kMipsFmovd;
- } else {
- if (r_dest.IsSingle()) {
- if (r_src.IsSingle()) {
- opcode = kMipsFmovs;
+ if (cu_->target64) {
+ DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
+ if (r_dest.Is64Bit()) {
+ if (r_dest.IsDouble()) {
+ if (r_src.IsDouble()) {
+ opcode = kMipsFmovd;
+ } else {
+ // Note the operands are swapped for the dmtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMips64Dmtc1;
+ }
} else {
- /* note the operands are swapped for the mtc1 instr */
- RegStorage t_opnd = r_src;
- r_src = r_dest;
- r_dest = t_opnd;
- opcode = kMipsMtc1;
+ DCHECK(r_src.IsDouble());
+ opcode = kMips64Dmfc1;
}
} else {
- DCHECK(r_src.IsSingle());
- opcode = kMipsMfc1;
+ if (r_dest.IsSingle()) {
+ if (r_src.IsSingle()) {
+ opcode = kMipsFmovs;
+ } else {
+ // Note the operands are swapped for the mtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMipsMtc1;
+ }
+ } else {
+ DCHECK(r_src.IsSingle());
+ opcode = kMipsMfc1;
+ }
}
+ } else {
+ // Must be both DOUBLE or both not DOUBLE.
+ DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
+ if (r_dest.IsDouble()) {
+ opcode = kMipsFmovd;
+ } else {
+ if (r_dest.IsSingle()) {
+ if (r_src.IsSingle()) {
+ opcode = kMipsFmovs;
+ } else {
+ // Note the operands are swapped for the mtc1 instr.
+ RegStorage t_opnd = r_src;
+ r_src = r_dest;
+ r_dest = t_opnd;
+ opcode = kMipsMtc1;
+ }
+ } else {
+ DCHECK(r_src.IsSingle());
+ opcode = kMipsMfc1;
+ }
+ }
+ }
+ LIR* res;
+ if (cu_->target64) {
+ res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
+ } else {
+ res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
}
- LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
res->flags.is_nop = true;
}
@@ -95,7 +135,7 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
r_dest = AllocTemp();
}
- /* See if the value can be constructed cheaply */
+ // See if the value can be constructed cheaply.
if (value == 0) {
res = NewLIR2(kMipsMove, r_dest.GetReg(), rZERO);
} else if (IsUint<16>(value)) {
@@ -118,6 +158,117 @@ LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
return res;
}
+LIR* MipsMir2Lir::LoadConstantWideNoClobber(RegStorage r_dest, int64_t value) {
+ LIR* res = nullptr;
+ DCHECK(r_dest.Is64Bit());
+ RegStorage r_dest_save = r_dest;
+ int is_fp_reg = r_dest.IsFloat();
+ if (is_fp_reg) {
+ DCHECK(r_dest.IsDouble());
+ r_dest = AllocTemp();
+ }
+
+ int bit31 = (value & UINT64_C(0x80000000)) != 0;
+
+ // Loads with 1 instruction.
+ if (IsUint<16>(value)) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
+ } else if (IsInt<16>(value)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
+ } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+ } else if (IsInt<32>(value)) {
+ // Loads with 2 instructions.
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
+ } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, value);
+ NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
+ } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
+ (value >> 32) <= (32767 - bit31)) {
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
+ } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), value >> 16);
+ NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
+ } else {
+ int64_t tmp = value;
+ int shift_cnt = 0;
+ while ((tmp & 1) == 0) {
+ tmp >>= 1;
+ shift_cnt++;
+ }
+
+ if (IsUint<16>(tmp)) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else if (IsInt<16>(tmp)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else if (IsInt<32>(tmp)) {
+ // Loads with 3 instructions.
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp >> 16);
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ } else {
+ tmp = value >> 16;
+ shift_cnt = 16;
+ while ((tmp & 1) == 0) {
+ tmp >>= 1;
+ shift_cnt++;
+ }
+
+ if (IsUint<16>(tmp)) {
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else if (IsInt<16>(tmp)) {
+ res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
+ NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
+ shift_cnt & 0x1F);
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), value);
+ } else {
+ // Loads with 3-4 instructions.
+ uint64_t tmp2 = value;
+ if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
+ res = NewLIR2(kMipsLui, r_dest.GetReg(), tmp2 >> 16);
+ }
+ if ((tmp2 & 0xFFFF) != 0) {
+ if (res)
+ NewLIR3(kMipsOri, r_dest.GetReg(), r_dest.GetReg(), tmp2);
+ else
+ res = NewLIR3(kMipsOri, r_dest.GetReg(), rZEROd, tmp2);
+ }
+ if (bit31) {
+ tmp2 += UINT64_C(0x100000000);
+ }
+ if (((tmp2 >> 32) & 0xFFFF) != 0) {
+ NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
+ }
+ if (tmp2 & UINT64_C(0x800000000000)) {
+ tmp2 += UINT64_C(0x1000000000000);
+ }
+ if ((tmp2 >> 48) != 0) {
+ NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
+ }
+ }
+ }
+ }
+
+ if (is_fp_reg) {
+ NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
+ FreeTemp(r_dest);
+ }
+ return res;
+}
+
LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
res->target = target;
@@ -136,57 +287,33 @@ LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
default:
LOG(FATAL) << "Bad case in OpReg";
}
- return NewLIR2(opcode, rRA, r_dest_src.GetReg());
+ return NewLIR2(opcode, cu_->target64 ? rRAd : rRA, r_dest_src.GetReg());
}
LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
- LIR *res;
- bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
- bool short_form = (abs_value & 0xff) == abs_value;
- MipsOpCode opcode = kMipsNop;
- switch (op) {
- case kOpAdd:
- return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
- break;
- case kOpSub:
- return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
- break;
- default:
- LOG(FATAL) << "Bad case in OpRegImm";
- break;
- }
- if (short_form) {
- res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
+ if ((op == kOpAdd) || (op == kOpSub)) {
+ return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
} else {
- RegStorage r_scratch = AllocTemp();
- res = LoadConstant(r_scratch, value);
- if (op == kOpCmp)
- NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
- else
- NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
+ LOG(FATAL) << "Bad case in OpRegImm";
}
- return res;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
MipsOpCode opcode = kMipsNop;
+ bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit());
switch (op) {
case kOpAdd:
- opcode = kMipsAddu;
+ opcode = is64bit ? kMips64Daddu : kMipsAddu;
break;
case kOpSub:
- opcode = kMipsSubu;
+ opcode = is64bit ? kMips64Dsubu : kMipsSubu;
break;
case kOpAnd:
opcode = kMipsAnd;
break;
case kOpMul:
- if (isaIsR6_) {
- opcode = kMipsR6Mul;
- } else {
- opcode = kMipsMul;
- }
+ opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
break;
case kOpOr:
opcode = kMipsOr;
@@ -195,20 +322,20 @@ LIR* MipsMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, R
opcode = kMipsXor;
break;
case kOpLsl:
- opcode = kMipsSllv;
+ opcode = is64bit ? kMips64Dsllv : kMipsSllv;
break;
case kOpLsr:
- opcode = kMipsSrlv;
+ opcode = is64bit ? kMips64Dsrlv : kMipsSrlv;
break;
case kOpAsr:
- opcode = kMipsSrav;
+ opcode = is64bit ? kMips64Dsrav : kMipsSrav;
break;
case kOpAdc:
case kOpSbc:
LOG(FATAL) << "No carry bit on MIPS";
break;
default:
- LOG(FATAL) << "bad case in OpRegRegReg";
+ LOG(FATAL) << "Bad case in OpRegRegReg";
break;
}
return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
@@ -218,36 +345,67 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, i
LIR *res;
MipsOpCode opcode = kMipsNop;
bool short_form = true;
+ bool is64bit = cu_->target64 && (r_dest.Is64Bit() || r_src1.Is64Bit());
switch (op) {
case kOpAdd:
if (IS_SIMM16(value)) {
- opcode = kMipsAddiu;
+ opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
} else {
short_form = false;
- opcode = kMipsAddu;
+ opcode = is64bit ? kMips64Daddu : kMipsAddu;
}
break;
case kOpSub:
if (IS_SIMM16((-value))) {
value = -value;
- opcode = kMipsAddiu;
+ opcode = is64bit ? kMips64Daddiu : kMipsAddiu;
} else {
short_form = false;
- opcode = kMipsSubu;
+ opcode = is64bit ? kMips64Dsubu : kMipsSubu;
}
break;
case kOpLsl:
- DCHECK(value >= 0 && value <= 31);
- opcode = kMipsSll;
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsll;
+ } else {
+ opcode = kMips64Dsll32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSll;
+ }
break;
case kOpLsr:
- DCHECK(value >= 0 && value <= 31);
- opcode = kMipsSrl;
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsrl;
+ } else {
+ opcode = kMips64Dsrl32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSrl;
+ }
break;
case kOpAsr:
- DCHECK(value >= 0 && value <= 31);
- opcode = kMipsSra;
+ if (is64bit) {
+ DCHECK(value >= 0 && value <= 63);
+ if (value >= 0 && value <= 31) {
+ opcode = kMips64Dsra;
+ } else {
+ opcode = kMips64Dsra32;
+ value = value - 32;
+ }
+ } else {
+ DCHECK(value >= 0 && value <= 31);
+ opcode = kMipsSra;
+ }
break;
case kOpAnd:
if (IS_UIMM16((value))) {
@@ -275,11 +433,7 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, i
break;
case kOpMul:
short_form = false;
- if (isaIsR6_) {
- opcode = kMipsR6Mul;
- } else {
- opcode = kMipsMul;
- }
+ opcode = isaIsR6_ ? kMipsR6Mul : kMipsR2Mul;
break;
default:
LOG(FATAL) << "Bad case in OpRegRegImm";
@@ -293,8 +447,14 @@ LIR* MipsMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, i
res = LoadConstant(r_dest, value);
NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
} else {
- RegStorage r_scratch = AllocTemp();
- res = LoadConstant(r_scratch, value);
+ RegStorage r_scratch;
+ if (is64bit) {
+ r_scratch = AllocTempWide();
+ res = LoadConstantWide(r_scratch, value);
+ } else {
+ r_scratch = AllocTemp();
+ res = LoadConstant(r_scratch, value);
+ }
NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
}
}
@@ -311,7 +471,11 @@ LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
case kOpMvn:
return NewLIR3(kMipsNor, r_dest_src1.GetReg(), r_src2.GetReg(), rZERO);
case kOpNeg:
- return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
+ if (cu_->target64 && r_dest_src1.Is64Bit()) {
+ return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
+ } else {
+ return NewLIR3(kMipsSubu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
+ }
case kOpAdd:
case kOpAnd:
case kOpMul:
@@ -320,21 +484,29 @@ LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
case kOpXor:
return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
case kOp2Byte:
- if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
- ->IsMipsIsaRevGreaterThanEqual2()) {
+ if (cu_->target64) {
res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
} else {
- res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
- OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
+ if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+ ->IsMipsIsaRevGreaterThanEqual2()) {
+ res = NewLIR2(kMipsSeb, r_dest_src1.GetReg(), r_src2.GetReg());
+ } else {
+ res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 24);
+ OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 24);
+ }
}
return res;
case kOp2Short:
- if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
- ->IsMipsIsaRevGreaterThanEqual2()) {
+ if (cu_->target64) {
res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
} else {
- res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
- OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
+ if (cu_->compiler_driver->GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()
+ ->IsMipsIsaRevGreaterThanEqual2()) {
+ res = NewLIR2(kMipsSeh, r_dest_src1.GetReg(), r_src2.GetReg());
+ } else {
+ res = OpRegRegImm(kOpLsl, r_dest_src1, r_src2, 16);
+ OpRegRegImm(kOpAsr, r_dest_src1, r_dest_src1, 16);
+ }
}
return res;
case kOp2Char:
@@ -367,10 +539,14 @@ LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, R
LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
LIR *res;
+ if (cu_->target64) {
+ res = LoadConstantWideNoClobber(r_dest, value);
+ return res;
+ }
if (fpuIs32Bit_ || !r_dest.IsFloat()) {
// 32bit FPU (pairs) or loading into GPR.
if (!r_dest.IsPair()) {
- // Form 64-bit pair
+ // Form 64-bit pair.
r_dest = Solo64ToPair64(r_dest);
}
res = LoadConstantNoClobber(r_dest.GetLow(), Low32Bits(value));
@@ -393,7 +569,8 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
LIR *first = NULL;
LIR *res;
MipsOpCode opcode = kMipsNop;
- RegStorage t_reg = AllocTemp();
+ bool is64bit = cu_->target64 && r_dest.Is64Bit();
+ RegStorage t_reg = is64bit ? AllocTempWide() : AllocTemp();
if (r_dest.IsFloat()) {
DCHECK(r_dest.IsSingle());
@@ -404,14 +581,34 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
size = k32;
}
- if (!scale) {
- first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ if (cu_->target64) {
+ if (!scale) {
+ if (is64bit) {
+ first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ } else {
+ first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ }
+ } else {
+ first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+ NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ }
} else {
- first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ if (!scale) {
+ first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ } else {
+ first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
+ NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ }
}
switch (size) {
+ case k64:
+ if (cu_->target64) {
+ opcode = kMips64Ld;
+ } else {
+ LOG(FATAL) << "Bad case in LoadBaseIndexed";
+ }
+ break;
case kSingle:
opcode = kMipsFlwc1;
break;
@@ -440,7 +637,7 @@ LIR* MipsMir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor
return (first) ? first : res;
}
-/* store value base base + scaled index. */
+// Store value base base + scaled index.
LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
LIR *first = NULL;
@@ -456,11 +653,12 @@ LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
size = k32;
}
+ MipsOpCode add_opcode = cu_->target64 ? kMips64Daddu : kMipsAddu;
if (!scale) {
- first = NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
+ first = NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
} else {
first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMipsAddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
+ NewLIR3(add_opcode, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
}
switch (size) {
@@ -507,9 +705,19 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
switch (size) {
case k64:
case kDouble:
+ if (cu_->target64) {
+ r_dest = Check64BitReg(r_dest);
+ if (!r_dest.IsFloat()) {
+ opcode = kMips64Ld;
+ } else {
+ opcode = kMipsFldc1;
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ }
is64bit = true;
if (fpuIs32Bit_ && !r_dest.IsPair()) {
- // Form 64-bit pair
+ // Form 64-bit pair.
r_dest = Solo64ToPair64(r_dest);
}
short_form = IS_SIMM16_2WORD(displacement);
@@ -546,20 +754,40 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
}
+ if (cu_->target64) {
+ if (short_form) {
+ load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
+ } else {
+ RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
+ res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
+ load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
+ if (r_tmp != r_dest)
+ FreeTemp(r_tmp);
+ }
+
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK_EQ(r_base, TargetPtrReg(kSp));
+ AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
+ }
+ return res;
+ }
+
if (short_form) {
if (!is64bit) {
load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
} else {
if (fpuIs32Bit_ || !r_dest.IsFloat()) {
DCHECK(r_dest.IsPair());
- load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+ load = res = NewLIR3(opcode, r_dest.GetLowReg(), displacement + LOWORD_OFFSET,
+ r_base.GetReg());
load2 = NewLIR3(opcode, r_dest.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
} else {
// Here if 64bit fpu and r_dest is a 64bit fp register.
RegStorage r_tmp = AllocTemp();
// FIXME: why is r_dest a 64BitPair here???
r_dest = Fp64ToSolo32(r_dest);
- load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+ load = res = NewLIR3(kMipsFlwc1, r_dest.GetReg(), displacement + LOWORD_OFFSET,
+ r_base.GetReg());
load2 = NewLIR3(kMipsLw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
NewLIR2(kMipsMthc1, r_tmp.GetReg(), r_dest.GetReg());
FreeTemp(r_tmp);
@@ -591,7 +819,7 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK_EQ(r_base, rs_rMIPS_SP);
+ DCHECK_EQ(r_base, TargetPtrReg(kSp));
AnnotateDalvikRegAccess(load, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
true /* is_load */, is64bit /* is64bit */);
if (is64bit) {
@@ -599,19 +827,21 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
true /* is_load */, is64bit /* is64bit */);
}
}
- return load;
+ return res;
}
-LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) {
- if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
+ VolatileKind is_volatile) {
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))
+ && (!cu_->target64 || displacement & 0x7)) {
+ // TODO: use lld/scd instructions for Mips64.
// Do atomic 64-bit load.
return GenAtomic64Load(r_base, displacement, r_dest);
}
// TODO: base this on target.
if (size == kWord) {
- size = k32;
+ size = cu_->target64 ? k64 : k32;
}
LIR* load;
load = LoadBaseDispBody(r_base, displacement, r_dest, size);
@@ -624,8 +854,8 @@ LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r
}
// FIXME: don't split r_dest into 2 containers.
-LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
- RegStorage r_src, OpSize size) {
+LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
+ OpSize size) {
LIR *res;
LIR *store = NULL;
LIR *store2 = NULL;
@@ -636,9 +866,19 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
switch (size) {
case k64:
case kDouble:
+ if (cu_->target64) {
+ r_src = Check64BitReg(r_src);
+ if (!r_src.IsFloat()) {
+ opcode = kMips64Sd;
+ } else {
+ opcode = kMipsFsdc1;
+ }
+ DCHECK_EQ((displacement & 0x3), 0);
+ break;
+ }
is64bit = true;
if (fpuIs32Bit_ && !r_src.IsPair()) {
- // Form 64-bit pair
+ // Form 64-bit pair.
r_src = Solo64ToPair64(r_src);
}
short_form = IS_SIMM16_2WORD(displacement);
@@ -670,19 +910,38 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
LOG(FATAL) << "Bad case in StoreBaseDispBody";
}
+ if (cu_->target64) {
+ if (short_form) {
+ store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
+ } else {
+ RegStorage r_scratch = AllocTemp();
+ res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
+ store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
+ FreeTemp(r_scratch);
+ }
+
+ if (mem_ref_type_ == ResourceMask::kDalvikReg) {
+ DCHECK_EQ(r_base, TargetPtrReg(kSp));
+ AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
+ }
+ return res;
+ }
+
if (short_form) {
if (!is64bit) {
store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
} else {
if (fpuIs32Bit_ || !r_src.IsFloat()) {
DCHECK(r_src.IsPair());
- store = res = NewLIR3(opcode, r_src.GetLowReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+ store = res = NewLIR3(opcode, r_src.GetLowReg(), displacement + LOWORD_OFFSET,
+ r_base.GetReg());
store2 = NewLIR3(opcode, r_src.GetHighReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
} else {
// Here if 64bit fpu and r_src is a 64bit fp register
RegStorage r_tmp = AllocTemp();
r_src = Fp64ToSolo32(r_src);
- store = res = NewLIR3(kMipsFswc1, r_src.GetReg(), displacement + LOWORD_OFFSET, r_base.GetReg());
+ store = res = NewLIR3(kMipsFswc1, r_src.GetReg(), displacement + LOWORD_OFFSET,
+ r_base.GetReg());
NewLIR2(kMipsMfhc1, r_tmp.GetReg(), r_src.GetReg());
store2 = NewLIR3(kMipsSw, r_tmp.GetReg(), displacement + HIWORD_OFFSET, r_base.GetReg());
FreeTemp(r_tmp);
@@ -712,7 +971,7 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK_EQ(r_base, rs_rMIPS_SP);
+ DCHECK_EQ(r_base, TargetPtrReg(kSp));
AnnotateDalvikRegAccess(store, (displacement + (is64bit ? LOWORD_OFFSET : 0)) >> 2,
false /* is_load */, is64bit /* is64bit */);
if (is64bit) {
@@ -724,21 +983,23 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
return res;
}
-LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) {
+LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
+ VolatileKind is_volatile) {
if (is_volatile == kVolatile) {
// Ensure that prior accesses become visible to other threads first.
GenMemBarrier(kAnyStore);
}
LIR* store;
- if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble))) {
+ if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
+ (!cu_->target64 || displacement & 0x7))) {
+ // TODO: use lld/scd instructions for Mips64.
// Do atomic 64-bit load.
store = GenAtomic64Store(r_base, displacement, r_src);
} else {
// TODO: base this on target.
if (size == kWord) {
- size = k32;
+ size = cu_->target64 ? k64 : k32;
}
store = StoreBaseDispBody(r_base, displacement, r_src, size);
}
@@ -765,7 +1026,7 @@ LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
}
LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- if (IsDirectEntrypoint(trampoline)) {
+ if (!cu_->target64 && IsDirectEntrypoint(trampoline)) {
// Reserve argument space on stack (for $a0-$a3) for
// entrypoints that directly reference native implementations.
// This is not safe in general, as it violates the frame size
@@ -780,4 +1041,8 @@ LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointE
return OpReg(op, r_tgt);
}
+RegStorage MipsMir2Lir::AllocPtrSizeTemp(bool required) {
+ return cu_->target64 ? AllocTempWide(required) : AllocTemp(required);
+}
+
} // namespace art
diff --git a/compiler/dex/quick/mips64/assemble_mips64.cc b/compiler/dex/quick/mips64/assemble_mips64.cc
deleted file mode 100644
index d96561bba4..0000000000
--- a/compiler/dex/quick/mips64/assemble_mips64.cc
+++ /dev/null
@@ -1,898 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips64.h"
-
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-#define MAX_ASSEMBLER_RETRIES 50
-
-/*
- * opcode: Mips64OpCode enum
- * skeleton: pre-designated bit-pattern for this opcode
- * k0: key to applying ds/de
- * ds: dest start bit position
- * de: dest end bit position
- * k1: key to applying s1s/s1e
- * s1s: src1 start bit position
- * s1e: src1 end bit position
- * k2: key to applying s2s/s2e
- * s2s: src2 start bit position
- * s2e: src2 end bit position
- * operands: number of operands (for sanity check purposes)
- * name: mnemonic name
- * fmt: for pretty-printing
- */
-#define ENCODING_MAP(opcode, skeleton, k0, ds, de, k1, s1s, s1e, k2, s2s, s2e, \
- k3, k3s, k3e, flags, name, fmt, size) \
- {skeleton, {{k0, ds, de}, {k1, s1s, s1e}, {k2, s2s, s2e}, \
- {k3, k3s, k3e}}, opcode, flags, name, fmt, size}
-
-/* Instruction dump string format keys: !pf, where "!" is the start
- * of the key, "p" is which numeric operand to use and "f" is the
- * print format.
- *
- * [p]ositions:
- * 0 -> operands[0] (dest)
- * 1 -> operands[1] (src1)
- * 2 -> operands[2] (src2)
- * 3 -> operands[3] (extra)
- *
- * [f]ormats:
- * h -> 4-digit hex
- * d -> decimal
- * E -> decimal*4
- * F -> decimal*2
- * c -> branch condition (beq, bne, etc.)
- * t -> pc-relative target
- * T -> pc-region target
- * u -> 1st half of bl[x] target
- * v -> 2nd half ob bl[x] target
- * R -> register list
- * s -> single precision floating point register
- * S -> double precision floating point register
- * m -> Thumb2 modified immediate
- * n -> complimented Thumb2 modified immediate
- * M -> Thumb2 16-bit zero-extended immediate
- * b -> 4-digit binary
- * N -> append a NOP
- *
- * [!] escape. To insert "!", use "!!"
- */
-/* NOTE: must be kept in sync with enum Mips64Opcode from mips64_lir.h */
-/*
- * TUNING: We're currently punting on the branch delay slots. All branch
- * instructions in this map are given a size of 8, which during assembly
- * is expanded to include a nop. This scheme should be replaced with
- * an assembler pass to fill those slots when possible.
- */
-const Mips64EncodingMap Mips64Mir2Lir::EncodingMap[kMips64Last] = {
- ENCODING_MAP(kMips6432BitData, 0x00000000,
- kFmtBitBlt, 31, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
- "data", "0x!0h(!0d)", 4),
- ENCODING_MAP(kMips64Addiu, 0x24000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "addiu", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Addu, 0x00000021,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "addu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64And, 0x00000024,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "and", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Andi, 0x30000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "andi", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64B, 0x10000000,
- kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP,
- "b", "!0t!0N", 8),
- ENCODING_MAP(kMips64Bal, 0x04110000,
- kFmtBitBlt, 15, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR |
- NEEDS_FIXUP, "bal", "!0t!0N", 8),
- ENCODING_MAP(kMips64Beq, 0x10000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
- NEEDS_FIXUP, "beq", "!0r,!1r,!2t!0N", 8),
- ENCODING_MAP(kMips64Beqz, 0x10000000, // Same as beq above with t = $zero.
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "beqz", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bgez, 0x04010000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "bgez", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bgtz, 0x1c000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "bgtz", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Blez, 0x18000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "blez", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bltz, 0x04000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "bltz", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bnez, 0x14000000, // Same as bne below with t = $zero.
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_USE0 |
- NEEDS_FIXUP, "bnez", "!0r,!1t!0N", 8),
- ENCODING_MAP(kMips64Bne, 0x14000000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_USE01 |
- NEEDS_FIXUP, "bne", "!0r,!1r,!2t!0N", 8),
- ENCODING_MAP(kMips64Break, 0x0000000d,
- kFmtBitBlt, 25, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP, "break", "!0d", 4),
- ENCODING_MAP(kMips64Daddiu, 0x64000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "daddiu", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Daddu, 0x0000002d,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "daddu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dahi, 0x04060000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
- "dahi", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Dati, 0x041E0000,
- kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE0,
- "dati", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Daui, 0x74000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "daui", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Ddiv, 0x0000009e,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "ddiv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Div, 0x0000009a,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dmod, 0x000000de,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dmod", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dmul, 0x0000009c,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dmul", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dmfc1, 0x44200000,
- kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "dmfc1", "!0r,!1s", 4),
- ENCODING_MAP(kMips64Dmtc1, 0x44a00000,
- kFmtBitBlt, 20, 16, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
- "dmtc1", "!0r,!1s", 4),
- ENCODING_MAP(kMips64Drotr32, 0x0000003e | (1 << 21),
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "drotr32", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsll, 0x00000038,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsll", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsll32, 0x0000003c,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsll32", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsrl, 0x0000003a,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsrl", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsrl32, 0x0000003e,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsrl32", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsra, 0x0000003b,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsra", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsra32, 0x0000003f,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "dsra32", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Dsllv, 0x00000014,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dsllv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dsrlv, 0x00000016,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dsrlv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dsrav, 0x00000017,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dsrav", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Dsubu, 0x0000002f,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "dsubu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Ext, 0x7c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
- kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
- "ext", "!0r,!1r,!2d,!3D", 4),
- ENCODING_MAP(kMips64Faddd, 0x46200000,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "add.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMips64Fadds, 0x46000000,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "add.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMips64Fdivd, 0x46200003,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMips64Fdivs, 0x46000003,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "div.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMips64Fmuld, 0x46200002,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMips64Fmuls, 0x46000002,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMips64Fsubd, 0x46200001,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtDfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sub.d", "!0S,!1S,!2S", 4),
- ENCODING_MAP(kMips64Fsubs, 0x46000001,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtSfp, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sub.s", "!0s,!1s,!2s", 4),
- ENCODING_MAP(kMips64Fcvtsd, 0x46200020,
- kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.s.d", "!0s,!1S", 4),
- ENCODING_MAP(kMips64Fcvtsw, 0x46800020,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.s.w", "!0s,!1s", 4),
- ENCODING_MAP(kMips64Fcvtds, 0x46000021,
- kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.d.s", "!0S,!1s", 4),
- ENCODING_MAP(kMips64Fcvtdw, 0x46800021,
- kFmtDfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.d.w", "!0S,!1s", 4),
- ENCODING_MAP(kMips64Fcvtws, 0x46000024,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.w.s", "!0s,!1s", 4),
- ENCODING_MAP(kMips64Fcvtwd, 0x46200024,
- kFmtSfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "cvt.w.d", "!0s,!1S", 4),
- ENCODING_MAP(kMips64Fmovd, 0x46200006,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mov.d", "!0S,!1S", 4),
- ENCODING_MAP(kMips64Fmovs, 0x46000006,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mov.s", "!0s,!1s", 4),
- ENCODING_MAP(kMips64Fnegd, 0x46200007,
- kFmtDfp, 10, 6, kFmtDfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "neg.d", "!0S,!1S", 4),
- ENCODING_MAP(kMips64Fnegs, 0x46000007,
- kFmtSfp, 10, 6, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "neg.s", "!0s,!1s", 4),
- ENCODING_MAP(kMips64Fldc1, 0xd4000000,
- kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "ldc1", "!0S,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Flwc1, 0xc4000000,
- kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lwc1", "!0s,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Fsdc1, 0xf4000000,
- kFmtDfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sdc1", "!0S,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Fswc1, 0xe4000000,
- kFmtSfp, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "swc1", "!0s,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Jal, 0x0c000000,
- kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
- "jal", "!0T(!0E)!0N", 8),
- ENCODING_MAP(kMips64Jalr, 0x00000009,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | IS_BRANCH | REG_DEF0_USE1,
- "jalr", "!0r,!1r!0N", 8),
- ENCODING_MAP(kMips64Lahi, 0x3c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
- "lahi/lui", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Lalo, 0x34000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "lalo/ori", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Lb, 0x80000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lb", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lbu, 0x90000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lbu", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Ld, 0xdc000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "ld", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lh, 0x84000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lh", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lhu, 0x94000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lhu", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lui, 0x3c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0,
- "lui", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Lw, 0x8c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lw", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Lwu, 0x9c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE2 | IS_LOAD,
- "lwu", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Mfc1, 0x44000000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "mfc1", "!0r,!1s", 4),
- ENCODING_MAP(kMips64Mtc1, 0x44800000,
- kFmtBitBlt, 20, 16, kFmtSfp, 15, 11, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_USE0 | REG_DEF1,
- "mtc1", "!0r,!1s", 4),
- ENCODING_MAP(kMips64Move, 0x0000002d, // Or using zero reg.
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "move", "!0r,!1r", 4),
- ENCODING_MAP(kMips64Mod, 0x000000da,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mod", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Mul, 0x00000098,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "mul", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Nop, 0x00000000,
- kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, NO_OPERAND,
- "nop", ";", 4),
- ENCODING_MAP(kMips64Nor, 0x00000027, // Used for "not" too.
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "nor", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Or, 0x00000025,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "or", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Ori, 0x34000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "ori", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Sb, 0xa0000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sb", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Sd, 0xfc000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sd", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Seb, 0x7c000420,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "seb", "!0r,!1r", 4),
- ENCODING_MAP(kMips64Seh, 0x7c000620,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
- "seh", "!0r,!1r", 4),
- ENCODING_MAP(kMips64Sh, 0xa4000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sh", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Sll, 0x00000000,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "sll", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Sllv, 0x00000004,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sllv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Slt, 0x0000002a,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "slt", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Slti, 0x28000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "slti", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Sltu, 0x0000002b,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "sltu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Sra, 0x00000003,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "sra", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Srav, 0x00000007,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "srav", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Srl, 0x00000002,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 10, 6,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "srl", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64Srlv, 0x00000006,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "srlv", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Subu, 0x00000023, // Used for "neg" too.
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "subu", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Sw, 0xac000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
- "sw", "!0r,!1d(!2r)", 4),
- ENCODING_MAP(kMips64Sync, 0x0000000f,
- kFmtBitBlt, 10, 6, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_UNARY_OP,
- "sync", ";", 4),
- ENCODING_MAP(kMips64Xor, 0x00000026,
- kFmtBitBlt, 15, 11, kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
- "xor", "!0r,!1r,!2r", 4),
- ENCODING_MAP(kMips64Xori, 0x38000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 15, 0,
- kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1,
- "xori", "!0r,!1r,0x!2h(!2d)", 4),
- ENCODING_MAP(kMips64CurrPC, 0x04110001,
- kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, NO_OPERAND | IS_BRANCH | REG_DEF_LR,
- "addiu", "ra,pc,8", 4),
- ENCODING_MAP(kMips64Delta, 0x67e00000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, 15, 0,
- kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | REG_USE_LR |
- NEEDS_FIXUP, "daddiu", "!0r,ra,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64DeltaHi, 0x3c000000,
- kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0 | NEEDS_FIXUP,
- "lui", "!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64DeltaLo, 0x34000000,
- kFmtBlt5_2, 16, 21, kFmtBitBlt, 15, 0, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, IS_QUAD_OP | REG_DEF0_USE0 | NEEDS_FIXUP,
- "ori", "!0r,!0r,0x!1h(!1d)", 4),
- ENCODING_MAP(kMips64Undefined, 0x64000000,
- kFmtUnused, -1, -1, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
- kFmtUnused, -1, -1, NO_OPERAND,
- "undefined", "", 4),
-};
-
-
-/*
- * Convert a short-form branch to long form. Hopefully, this won't happen
- * very often because the PIC sequence is especially unfortunate.
- *
- * Orig conditional branch
- * -----------------------
- * beq rs,rt,target
- *
- * Long conditional branch
- * -----------------------
- * bne rs,rt,hop
- * bal .+8 ; rRA <- anchor
- * lui rAT, ((target-anchor) >> 16)
- * anchor:
- * ori rAT, rAT, ((target-anchor) & 0xffff)
- * addu rAT, rAT, rRA
- * jalr rZERO, rAT
- * hop:
- *
- * Orig unconditional branch
- * -------------------------
- * b target
- *
- * Long unconditional branch
- * -----------------------
- * bal .+8 ; rRA <- anchor
- * lui rAT, ((target-anchor) >> 16)
- * anchor:
- * ori rAT, rAT, ((target-anchor) & 0xffff)
- * addu rAT, rAT, rRA
- * jalr rZERO, rAT
- *
- *
- * NOTE: An out-of-range bal isn't supported because it should
- * never happen with the current PIC model.
- */
-void Mips64Mir2Lir::ConvertShortToLongBranch(LIR* lir) {
- // For conditional branches we'll need to reverse the sense
- bool unconditional = false;
- int opcode = lir->opcode;
- int dalvik_offset = lir->dalvik_offset;
- switch (opcode) {
- case kMips64Bal:
- LOG(FATAL) << "long branch and link unsupported";
- UNREACHABLE();
- case kMips64B:
- unconditional = true;
- break;
- case kMips64Beq: opcode = kMips64Bne; break;
- case kMips64Bne: opcode = kMips64Beq; break;
- case kMips64Beqz: opcode = kMips64Bnez; break;
- case kMips64Bgez: opcode = kMips64Bltz; break;
- case kMips64Bgtz: opcode = kMips64Blez; break;
- case kMips64Blez: opcode = kMips64Bgtz; break;
- case kMips64Bltz: opcode = kMips64Bgez; break;
- case kMips64Bnez: opcode = kMips64Beqz; break;
- default:
- LOG(FATAL) << "Unexpected branch kind " << opcode;
- UNREACHABLE();
- }
- LIR* hop_target = NULL;
- if (!unconditional) {
- hop_target = RawLIR(dalvik_offset, kPseudoTargetLabel);
- LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
- lir->operands[1], 0, 0, 0, hop_target);
- InsertLIRBefore(lir, hop_branch);
- }
- LIR* curr_pc = RawLIR(dalvik_offset, kMips64CurrPC);
- InsertLIRBefore(lir, curr_pc);
- LIR* anchor = RawLIR(dalvik_offset, kPseudoTargetLabel);
- LIR* delta_hi = RawLIR(dalvik_offset, kMips64DeltaHi, rAT, 0, WrapPointer(anchor), 0, 0,
- lir->target);
- InsertLIRBefore(lir, delta_hi);
- InsertLIRBefore(lir, anchor);
- LIR* delta_lo = RawLIR(dalvik_offset, kMips64DeltaLo, rAT, 0, WrapPointer(anchor), 0, 0,
- lir->target);
- InsertLIRBefore(lir, delta_lo);
- LIR* addu = RawLIR(dalvik_offset, kMips64Addu, rAT, rAT, rRA);
- InsertLIRBefore(lir, addu);
- LIR* jalr = RawLIR(dalvik_offset, kMips64Jalr, rZERO, rAT);
- InsertLIRBefore(lir, jalr);
- if (!unconditional) {
- InsertLIRBefore(lir, hop_target);
- }
- NopLIR(lir);
-}
-
-/*
- * Assemble the LIR into binary instruction format. Note that we may
- * discover that pc-relative displacements may not fit the selected
- * instruction. In those cases we will try to substitute a new code
- * sequence or request that the trace be shortened and retried.
- */
-AssemblerStatus Mips64Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
- LIR *lir;
- AssemblerStatus res = kSuccess; // Assume success.
-
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
- if (lir->opcode < 0) {
- continue;
- }
-
- if (lir->flags.is_nop) {
- continue;
- }
-
- if (lir->flags.fixup != kFixupNone) {
- if (lir->opcode == kMips64Delta) {
- /*
- * The "Delta" pseudo-ops load the difference between
- * two pc-relative locations into a the target register
- * found in operands[0]. The delta is determined by
- * (label2 - label1), where label1 is a standard
- * kPseudoTargetLabel and is stored in operands[2].
- * If operands[3] is null, then label2 is a kPseudoTargetLabel
- * and is found in lir->target. If operands[3] is non-NULL,
- * then it is a Switch/Data table.
- */
- int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
- const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
- int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
- int delta = offset2 - offset1;
- if ((delta & 0xffff) == delta && ((delta & 0x8000) == 0)) {
- // Fits.
- lir->operands[1] = delta;
- } else {
- // Doesn't fit - must expand to kMips64Delta[Hi|Lo] pair.
- LIR *new_delta_hi = RawLIR(lir->dalvik_offset, kMips64DeltaHi, lir->operands[0], 0,
- lir->operands[2], lir->operands[3], 0, lir->target);
- InsertLIRBefore(lir, new_delta_hi);
- LIR *new_delta_lo = RawLIR(lir->dalvik_offset, kMips64DeltaLo, lir->operands[0], 0,
- lir->operands[2], lir->operands[3], 0, lir->target);
- InsertLIRBefore(lir, new_delta_lo);
- LIR *new_addu = RawLIR(lir->dalvik_offset, kMips64Daddu, lir->operands[0],
- lir->operands[0], rRAd);
- InsertLIRBefore(lir, new_addu);
- NopLIR(lir);
- res = kRetryAll;
- }
- } else if (lir->opcode == kMips64DeltaLo) {
- int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
- const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
- int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
- int delta = offset2 - offset1;
- lir->operands[1] = delta & 0xffff;
- } else if (lir->opcode == kMips64DeltaHi) {
- int offset1 = UnwrapPointer<LIR>(lir->operands[2])->offset;
- const EmbeddedData* tab_rec = UnwrapPointer<EmbeddedData>(lir->operands[3]);
- int offset2 = tab_rec ? tab_rec->offset : lir->target->offset;
- int delta = offset2 - offset1;
- lir->operands[1] = (delta >> 16) & 0xffff;
- } else if (lir->opcode == kMips64B || lir->opcode == kMips64Bal) {
- LIR *target_lir = lir->target;
- CodeOffset pc = lir->offset + 4;
- CodeOffset target = target_lir->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- if (delta > 131068 || delta < -131069) {
- res = kRetryAll;
- ConvertShortToLongBranch(lir);
- } else {
- lir->operands[0] = delta >> 2;
- }
- } else if (lir->opcode >= kMips64Beqz && lir->opcode <= kMips64Bnez) {
- LIR *target_lir = lir->target;
- CodeOffset pc = lir->offset + 4;
- CodeOffset target = target_lir->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- if (delta > 131068 || delta < -131069) {
- res = kRetryAll;
- ConvertShortToLongBranch(lir);
- } else {
- lir->operands[1] = delta >> 2;
- }
- } else if (lir->opcode == kMips64Beq || lir->opcode == kMips64Bne) {
- LIR *target_lir = lir->target;
- CodeOffset pc = lir->offset + 4;
- CodeOffset target = target_lir->offset;
- int delta = target - pc;
- if (delta & 0x3) {
- LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
- }
- if (delta > 131068 || delta < -131069) {
- res = kRetryAll;
- ConvertShortToLongBranch(lir);
- } else {
- lir->operands[2] = delta >> 2;
- }
- } else if (lir->opcode == kMips64Jal) {
- CodeOffset cur_pc = (start_addr + lir->offset + 4) & ~3;
- CodeOffset target = lir->operands[0];
- /* ensure PC-region branch can be used */
- DCHECK_EQ((cur_pc & 0xF0000000), (target & 0xF0000000));
- if (target & 0x3) {
- LOG(FATAL) << "Jump target not multiple of 4: " << target;
- }
- lir->operands[0] = target >> 2;
- } else if (lir->opcode == kMips64Lahi) { /* ld address hi (via lui) */
- LIR *target_lir = lir->target;
- CodeOffset target = start_addr + target_lir->offset;
- lir->operands[1] = target >> 16;
- } else if (lir->opcode == kMips64Lalo) { /* ld address lo (via ori) */
- LIR *target_lir = lir->target;
- CodeOffset target = start_addr + target_lir->offset;
- lir->operands[2] = lir->operands[2] + target;
- }
- }
-
- /*
- * If one of the pc-relative instructions expanded we'll have
- * to make another pass. Don't bother to fully assemble the
- * instruction.
- */
- if (res != kSuccess) {
- continue;
- }
- DCHECK(!IsPseudoLirOp(lir->opcode));
- const Mips64EncodingMap *encoder = &EncodingMap[lir->opcode];
- uint32_t bits = encoder->skeleton;
- int i;
- for (i = 0; i < 4; i++) {
- uint32_t operand;
- uint32_t value;
- operand = lir->operands[i];
- switch (encoder->field_loc[i].kind) {
- case kFmtUnused:
- break;
- case kFmtBitBlt:
- if (encoder->field_loc[i].start == 0 && encoder->field_loc[i].end == 31) {
- value = operand;
- } else {
- value = (operand << encoder->field_loc[i].start) &
- ((1 << (encoder->field_loc[i].end + 1)) - 1);
- }
- bits |= value;
- break;
- case kFmtBlt5_2:
- value = (operand & 0x1f);
- bits |= (value << encoder->field_loc[i].start);
- bits |= (value << encoder->field_loc[i].end);
- break;
- case kFmtDfp: {
- // TODO: do we need to adjust now that we're using 64BitSolo?
- DCHECK(RegStorage::IsDouble(operand)) << ", Operand = 0x" << std::hex << operand;
- value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
- ((1 << (encoder->field_loc[i].end + 1)) - 1);
- bits |= value;
- break;
- }
- case kFmtSfp:
- DCHECK(RegStorage::IsSingle(operand)) << ", Operand = 0x" << std::hex << operand;
- value = (RegStorage::RegNum(operand) << encoder->field_loc[i].start) &
- ((1 << (encoder->field_loc[i].end + 1)) - 1);
- bits |= value;
- break;
- default:
- LOG(FATAL) << "Bad encoder format: " << encoder->field_loc[i].kind;
- }
- }
- // We only support little-endian MIPS64.
- code_buffer_.push_back(bits & 0xff);
- code_buffer_.push_back((bits >> 8) & 0xff);
- code_buffer_.push_back((bits >> 16) & 0xff);
- code_buffer_.push_back((bits >> 24) & 0xff);
- // TUNING: replace with proper delay slot handling.
- if (encoder->size == 8) {
- DCHECK(!IsPseudoLirOp(lir->opcode));
- const Mips64EncodingMap *encoder2 = &EncodingMap[kMips64Nop];
- uint32_t bits2 = encoder2->skeleton;
- code_buffer_.push_back(bits2 & 0xff);
- code_buffer_.push_back((bits2 >> 8) & 0xff);
- code_buffer_.push_back((bits2 >> 16) & 0xff);
- code_buffer_.push_back((bits2 >> 24) & 0xff);
- }
- }
- return res;
-}
-
-size_t Mips64Mir2Lir::GetInsnSize(LIR* lir) {
- DCHECK(!IsPseudoLirOp(lir->opcode));
- return EncodingMap[lir->opcode].size;
-}
-
-// LIR offset assignment.
-// TODO: consolidate w/ Arm assembly mechanism.
-int Mips64Mir2Lir::AssignInsnOffsets() {
- LIR* lir;
- int offset = 0;
-
- for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
- lir->offset = offset;
- if (LIKELY(lir->opcode >= 0)) {
- if (!lir->flags.is_nop) {
- offset += lir->flags.size;
- }
- } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) {
- if (offset & 0x2) {
- offset += 2;
- lir->operands[0] = 1;
- } else {
- lir->operands[0] = 0;
- }
- }
- // Pseudo opcodes don't consume space.
- }
- return offset;
-}
-
-/*
- * Walk the compilation unit and assign offsets to instructions
- * and literals and compute the total size of the compiled unit.
- * TODO: consolidate w/ Arm assembly mechanism.
- */
-void Mips64Mir2Lir::AssignOffsets() {
- int offset = AssignInsnOffsets();
-
- // Const values have to be word aligned.
- offset = RoundUp(offset, 4);
-
- // Set up offsets for literals.
- data_offset_ = offset;
-
- offset = AssignLiteralOffset(offset);
-
- offset = AssignSwitchTablesOffset(offset);
-
- offset = AssignFillArrayDataOffset(offset);
-
- total_size_ = offset;
-}
-
-/*
- * Go over each instruction in the list and calculate the offset from the top
- * before sending them off to the assembler. If out-of-range branch distance is
- * seen rearrange the instructions a bit to correct it.
- * TODO: consolidate w/ Arm assembly mechanism.
- */
-void Mips64Mir2Lir::AssembleLIR() {
- cu_->NewTimingSplit("Assemble");
- AssignOffsets();
- int assembler_retries = 0;
- /*
- * Assemble here. Note that we generate code with optimistic assumptions
- * and if found now to work, we'll have to redo the sequence and retry.
- */
-
- while (true) {
- AssemblerStatus res = AssembleInstructions(0);
- if (res == kSuccess) {
- break;
- } else {
- assembler_retries++;
- if (assembler_retries > MAX_ASSEMBLER_RETRIES) {
- CodegenDump();
- LOG(FATAL) << "Assembler error - too many retries";
- }
- // Redo offsets and try again.
- AssignOffsets();
- code_buffer_.clear();
- }
- }
-
- // Install literals.
- InstallLiteralPools();
-
- // Install switch tables.
- InstallSwitchTables();
-
- // Install fill array data.
- InstallFillArrayData();
-
- // Create the mapping table and native offset to reference map.
- cu_->NewTimingSplit("PcMappingTable");
- CreateMappingTables();
-
- cu_->NewTimingSplit("GcMap");
- CreateNativeGcMap();
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/backend_mips64.h b/compiler/dex/quick/mips64/backend_mips64.h
deleted file mode 100644
index cc30ae06d8..0000000000
--- a/compiler/dex/quick/mips64/backend_mips64.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
-#define ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
-
-namespace art {
-
-struct CompilationUnit;
-class Mir2Lir;
-class MIRGraph;
-class ArenaAllocator;
-
-Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
- ArenaAllocator* const arena);
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_QUICK_MIPS64_BACKEND_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/call_mips64.cc b/compiler/dex/quick/mips64/call_mips64.cc
deleted file mode 100644
index 0e587706cc..0000000000
--- a/compiler/dex/quick/mips64/call_mips64.cc
+++ /dev/null
@@ -1,421 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Mips64 ISA */
-
-#include "codegen_mips64.h"
-
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "gc/accounting/card_table.h"
-#include "mips64_lir.h"
-#include "mirror/art_method.h"
-#include "mirror/object_array-inl.h"
-
-namespace art {
-
-bool Mips64Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
- // TODO
- UNUSED(bb, mir, special);
- return false;
-}
-
-/*
- * The lack of pc-relative loads on Mips64 presents somewhat of a challenge
- * for our PIC switch table strategy. To materialize the current location
- * we'll do a dummy JAL and reference our tables using rRA as the
- * base register. Note that rRA will be used both as the base to
- * locate the switch table data and as the reference base for the switch
- * target offsets stored in the table. We'll use a special pseudo-instruction
- * to represent the jal and trigger the construction of the
- * switch table offsets (which will happen after final assembly and all
- * labels are fixed).
- *
- * The test loop will look something like:
- *
- * ori r_end, rZERO, #table_size ; size in bytes
- * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
- * nop ; opportunistically fill
- * BaseLabel:
- * addiu r_base, rRA, <table> - <BaseLabel> ; table relative to BaseLabel
- addu r_end, r_end, r_base ; end of table
- * lw r_val, [rSP, v_reg_off] ; Test Value
- * loop:
- * beq r_base, r_end, done
- * lw r_key, 0(r_base)
- * addu r_base, 8
- * bne r_val, r_key, loop
- * lw r_disp, -4(r_base)
- * addu rRA, r_disp
- * jalr rZERO, rRA
- * done:
- *
- */
-void Mips64Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
- const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- // Add the table to the list - we'll process it later.
- SwitchTable* tab_rec = static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),
- kArenaAllocData));
- tab_rec->switch_mir = mir;
- tab_rec->table = table;
- tab_rec->vaddr = current_dalvik_offset_;
- int elements = table[1];
- switch_tables_.push_back(tab_rec);
-
- // The table is composed of 8-byte key/disp pairs.
- int byte_size = elements * 8;
-
- int size_hi = byte_size >> 16;
- int size_lo = byte_size & 0xffff;
-
- RegStorage r_end = AllocTempWide();
- if (size_hi) {
- NewLIR2(kMips64Lui, r_end.GetReg(), size_hi);
- }
- // Must prevent code motion for the curr pc pair.
- GenBarrier(); // Scheduling barrier.
- NewLIR0(kMips64CurrPC); // Really a jal to .+8.
- // Now, fill the branch delay slot.
- if (size_hi) {
- NewLIR3(kMips64Ori, r_end.GetReg(), r_end.GetReg(), size_lo);
- } else {
- NewLIR3(kMips64Ori, r_end.GetReg(), rZERO, size_lo);
- }
- GenBarrier(); // Scheduling barrier.
-
- // Construct BaseLabel and set up table base register.
- LIR* base_label = NewLIR0(kPseudoTargetLabel);
- // Remember base label so offsets can be computed later.
- tab_rec->anchor = base_label;
- RegStorage r_base = AllocTempWide();
- NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
- OpRegRegReg(kOpAdd, r_end, r_end, r_base);
-
- // Grab switch test value.
- rl_src = LoadValue(rl_src, kCoreReg);
-
- // Test loop.
- RegStorage r_key = AllocTemp();
- LIR* loop_label = NewLIR0(kPseudoTargetLabel);
- LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
- Load32Disp(r_base, 0, r_key);
- OpRegImm(kOpAdd, r_base, 8);
- OpCmpBranch(kCondNe, rl_src.reg, r_key, loop_label);
- RegStorage r_disp = AllocTemp();
- Load32Disp(r_base, -4, r_disp);
- OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
- OpReg(kOpBx, TargetReg(kLr, kWide));
-
- // Loop exit.
- LIR* exit_label = NewLIR0(kPseudoTargetLabel);
- exit_branch->target = exit_label;
-}
-
-/*
- * Code pattern will look something like:
- *
- * lw r_val
- * jal BaseLabel ; stores "return address" (BaseLabel) in rRA
- * nop ; opportunistically fill
- * [subiu r_val, bias] ; Remove bias if low_val != 0
- * bound check -> done
- * lw r_disp, [rRA, r_val]
- * addu rRA, r_disp
- * jalr rZERO, rRA
- * done:
- */
-void Mips64Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
- const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
- // Add the table to the list - we'll process it later.
- SwitchTable* tab_rec =
- static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
- tab_rec->switch_mir = mir;
- tab_rec->table = table;
- tab_rec->vaddr = current_dalvik_offset_;
- int size = table[1];
- switch_tables_.push_back(tab_rec);
-
- // Get the switch value.
- rl_src = LoadValue(rl_src, kCoreReg);
-
- // Prepare the bias. If too big, handle 1st stage here.
- int low_key = s4FromSwitchData(&table[2]);
- bool large_bias = false;
- RegStorage r_key;
- if (low_key == 0) {
- r_key = rl_src.reg;
- } else if ((low_key & 0xffff) != low_key) {
- r_key = AllocTemp();
- LoadConstant(r_key, low_key);
- large_bias = true;
- } else {
- r_key = AllocTemp();
- }
-
- // Must prevent code motion for the curr pc pair.
- GenBarrier();
- NewLIR0(kMips64CurrPC); // Really a jal to .+8.
- // Now, fill the branch delay slot with bias strip.
- if (low_key == 0) {
- NewLIR0(kMips64Nop);
- } else {
- if (large_bias) {
- OpRegRegReg(kOpSub, r_key, rl_src.reg, r_key);
- } else {
- OpRegRegImm(kOpSub, r_key, rl_src.reg, low_key);
- }
- }
- GenBarrier(); // Scheduling barrier.
-
- // Construct BaseLabel and set up table base register.
- LIR* base_label = NewLIR0(kPseudoTargetLabel);
- // Remember base label so offsets can be computed later.
- tab_rec->anchor = base_label;
-
- // Bounds check - if < 0 or >= size continue following switch.
- LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
-
- // Materialize the table base pointer.
- RegStorage r_base = AllocTempWide();
- NewLIR4(kMips64Delta, r_base.GetReg(), 0, WrapPointer(base_label), WrapPointer(tab_rec));
-
- // Load the displacement from the switch table.
- RegStorage r_disp = AllocTemp();
- LoadBaseIndexed(r_base, r_key, r_disp, 2, k32);
-
- // Add to rAP and go.
- OpRegRegReg(kOpAdd, TargetReg(kLr, kWide), TargetReg(kLr, kWide), r_disp);
- OpReg(kOpBx, TargetReg(kLr, kWide));
-
- // Branch_over target here.
- LIR* target = NewLIR0(kPseudoTargetLabel);
- branch_over->target = target;
-}
-
-void Mips64Mir2Lir::GenMoveException(RegLocation rl_dest) {
- int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
- RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
- RegStorage reset_reg = AllocTempRef();
- LoadRefDisp(rs_rMIPS64_SELF, ex_offset, rl_result.reg, kNotVolatile);
- LoadConstant(reset_reg, 0);
- StoreRefDisp(rs_rMIPS64_SELF, ex_offset, reset_reg, kNotVolatile);
- FreeTemp(reset_reg);
- StoreValue(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) {
- RegStorage reg_card_base = AllocTempWide();
- RegStorage reg_card_no = AllocTempWide();
- // NOTE: native pointer.
- LoadWordDisp(rs_rMIPS64_SELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
- OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
- StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base), 0, kUnsignedByte);
- FreeTemp(reg_card_base);
- FreeTemp(reg_card_no);
-}
-
-void Mips64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
- int spill_count = num_core_spills_ + num_fp_spills_;
- /*
- * On entry, rMIPS64_ARG0, rMIPS64_ARG1, rMIPS64_ARG2, rMIPS64_ARG3,
- * rMIPS64_ARG4, rMIPS64_ARG5, rMIPS64_ARG6 & rMIPS64_ARG7 are live.
- * Let the register allocation mechanism know so it doesn't try to
- * use any of them when expanding the frame or flushing.
- */
- LockTemp(rs_rMIPS64_ARG0);
- LockTemp(rs_rMIPS64_ARG1);
- LockTemp(rs_rMIPS64_ARG2);
- LockTemp(rs_rMIPS64_ARG3);
- LockTemp(rs_rMIPS64_ARG4);
- LockTemp(rs_rMIPS64_ARG5);
- LockTemp(rs_rMIPS64_ARG6);
- LockTemp(rs_rMIPS64_ARG7);
-
- /*
- * We can safely skip the stack overflow check if we're
- * a leaf *and* our frame size < fudge factor.
- */
- bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_,
- kMips64);
- NewLIR0(kPseudoMethodEntry);
- RegStorage check_reg = AllocTempWide();
- RegStorage new_sp = AllocTempWide();
- if (!skip_overflow_check) {
- // Load stack limit.
- LoadWordDisp(rs_rMIPS64_SELF, Thread::StackEndOffset<8>().Int32Value(), check_reg);
- }
- // Spill core callee saves.
- SpillCoreRegs();
- // NOTE: promotion of FP regs currently unsupported, thus no FP spill.
- DCHECK_EQ(num_fp_spills_, 0);
- const int frame_sub = frame_size_ - spill_count * 8;
- if (!skip_overflow_check) {
- class StackOverflowSlowPath : public LIRSlowPath {
- public:
- StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
- : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) {
- }
- void Compile() OVERRIDE {
- m2l_->ResetRegPool();
- m2l_->ResetDefTracking();
- GenerateTargetLabel(kPseudoThrowTarget);
- // Load RA from the top of the frame.
- m2l_->LoadWordDisp(rs_rMIPS64_SP, sp_displace_ - 8, rs_rRAd);
- m2l_->OpRegImm(kOpAdd, rs_rMIPS64_SP, sp_displace_);
- m2l_->ClobberCallerSave();
- RegStorage r_tgt = m2l_->CallHelperSetup(kQuickThrowStackOverflow); // Doesn't clobber LR.
- m2l_->CallHelper(r_tgt, kQuickThrowStackOverflow, false /* MarkSafepointPC */,
- false /* UseLink */);
- }
-
- private:
- const size_t sp_displace_;
- };
- OpRegRegImm(kOpSub, new_sp, rs_rMIPS64_SP, frame_sub);
- LIR* branch = OpCmpBranch(kCondUlt, new_sp, check_reg, nullptr);
- AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_count * 8));
- // TODO: avoid copy for small frame sizes.
- OpRegCopy(rs_rMIPS64_SP, new_sp); // Establish stack.
- } else {
- OpRegImm(kOpSub, rs_rMIPS64_SP, frame_sub);
- }
-
- FlushIns(ArgLocs, rl_method);
-
- FreeTemp(rs_rMIPS64_ARG0);
- FreeTemp(rs_rMIPS64_ARG1);
- FreeTemp(rs_rMIPS64_ARG2);
- FreeTemp(rs_rMIPS64_ARG3);
- FreeTemp(rs_rMIPS64_ARG4);
- FreeTemp(rs_rMIPS64_ARG5);
- FreeTemp(rs_rMIPS64_ARG6);
- FreeTemp(rs_rMIPS64_ARG7);
-}
-
-void Mips64Mir2Lir::GenExitSequence() {
- /*
- * In the exit path, rMIPS64_RET0/rMIPS64_RET1 are live - make sure they aren't
- * allocated by the register utilities as temps.
- */
- LockTemp(rs_rMIPS64_RET0);
- LockTemp(rs_rMIPS64_RET1);
-
- NewLIR0(kPseudoMethodExit);
- UnSpillCoreRegs();
- OpReg(kOpBx, rs_rRAd);
-}
-
-void Mips64Mir2Lir::GenSpecialExitSequence() {
- OpReg(kOpBx, rs_rRAd);
-}
-
-void Mips64Mir2Lir::GenSpecialEntryForSuspend() {
- // Keep 16-byte stack alignment - push A0, i.e. ArtMethod* and RA.
- core_spill_mask_ = (1u << rs_rRAd.GetRegNum());
- num_core_spills_ = 1u;
- fp_spill_mask_ = 0u;
- num_fp_spills_ = 0u;
- frame_size_ = 16u;
- core_vmap_table_.clear();
- fp_vmap_table_.clear();
- OpRegImm(kOpSub, rs_rMIPS64_SP, frame_size_);
- StoreWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
- StoreWordDisp(rs_rMIPS64_SP, 0, rs_rA0d);
-}
-
-void Mips64Mir2Lir::GenSpecialExitForSuspend() {
- // Pop the frame. Don't pop ArtMethod*, it's no longer needed.
- LoadWordDisp(rs_rMIPS64_SP, frame_size_ - 8, rs_rRAd);
- OpRegImm(kOpAdd, rs_rMIPS64_SP, frame_size_);
-}
-
-/*
- * Bit of a hack here - in the absence of a real scheduling pass,
- * emit the next instruction in static & direct invoke sequences.
- */
-static int Mips64NextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, int state,
- const MethodReference& target_method, uint32_t,
- uintptr_t direct_code, uintptr_t direct_method, InvokeType type) {
- Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
- if (direct_code != 0 && direct_method != 0) {
- switch (state) {
- case 0: // Get the current Method* [sets kArg0]
- if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
- } else {
- cg->LoadCodeAddress(target_method, type, kInvokeTgt);
- }
- if (direct_method != static_cast<uintptr_t>(-1)) {
- cg->LoadConstantWide(cg->TargetReg(kArg0, kRef), direct_method);
- } else {
- cg->LoadMethodAddress(target_method, type, kArg0);
- }
- break;
- default:
- return -1;
- }
- } else {
- RegStorage arg0_ref = cg->TargetReg(kArg0, kRef);
- switch (state) {
- case 0: // Get the current Method* [sets kArg0]
- // TUNING: we can save a reg copy if Method* has been promoted.
- cg->LoadCurrMethodDirect(arg0_ref);
- break;
- case 1: // Get method->dex_cache_resolved_methods_
- cg->LoadRefDisp(arg0_ref, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(),
- arg0_ref, kNotVolatile);
- // Set up direct code if known.
- if (direct_code != 0) {
- if (direct_code != static_cast<uintptr_t>(-1)) {
- cg->LoadConstantWide(cg->TargetPtrReg(kInvokeTgt), direct_code);
- } else {
- CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
- cg->LoadCodeAddress(target_method, type, kInvokeTgt);
- }
- }
- break;
- case 2: // Grab target method*
- CHECK_EQ(cu->dex_file, target_method.dex_file);
- cg->LoadRefDisp(arg0_ref, mirror::ObjectArray<mirror::Object>::
- OffsetOfElement(target_method.dex_method_index).Int32Value(), arg0_ref,
- kNotVolatile);
- break;
- case 3: // Grab the code from the method*
- if (direct_code == 0) {
- int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
- InstructionSetPointerSize(cu->instruction_set)).Int32Value();
- // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
- cg->LoadWordDisp(arg0_ref, offset, cg->TargetPtrReg(kInvokeTgt));
- }
- break;
- default:
- return -1;
- }
- }
- return state + 1;
-}
-
-NextCallInsn Mips64Mir2Lir::GetNextSDCallInsn() {
- return Mips64NextSDCallInsn;
-}
-
-LIR* Mips64Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info ATTRIBUTE_UNUSED) {
- return OpReg(kOpBlx, TargetPtrReg(kInvokeTgt));
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/codegen_mips64.h b/compiler/dex/quick/mips64/codegen_mips64.h
deleted file mode 100644
index c9fd62f757..0000000000
--- a/compiler/dex/quick/mips64/codegen_mips64.h
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
-#define ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
-
-#include "dex/quick/mir_to_lir.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-struct CompilationUnit;
-
-class Mips64Mir2Lir FINAL : public Mir2Lir {
- protected:
- class InToRegStorageMips64Mapper : public InToRegStorageMapper {
- public:
- explicit InToRegStorageMips64Mapper(Mir2Lir* m2l) : m2l_(m2l), cur_arg_reg_(0) {}
- virtual RegStorage GetNextReg(ShortyArg arg);
- virtual void Reset() OVERRIDE {
- cur_arg_reg_ = 0;
- }
- protected:
- Mir2Lir* m2l_;
- private:
- size_t cur_arg_reg_;
- };
-
- InToRegStorageMips64Mapper in_to_reg_storage_mips64_mapper_;
- InToRegStorageMapper* GetResetedInToRegStorageMapper() OVERRIDE {
- in_to_reg_storage_mips64_mapper_.Reset();
- return &in_to_reg_storage_mips64_mapper_;
- }
-
- public:
- Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
-
- // Required for target - codegen utilities.
- bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
- RegLocation rl_dest, int lit);
- bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
- void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, int32_t constant)
- OVERRIDE;
- void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, int64_t constant)
- OVERRIDE;
- LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
- LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
- VolatileKind is_volatile) OVERRIDE;
- LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
- OpSize size) OVERRIDE;
- LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
- LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
- LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
- VolatileKind is_volatile) OVERRIDE;
- LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
- OpSize size) OVERRIDE;
- LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
- LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
-
- /// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
- void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
-
- // Required for target - register utilities.
- RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
- RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE {
- if (wide_kind == kWide || wide_kind == kRef) {
- return As64BitReg(TargetReg(reg));
- } else {
- return Check32BitReg(TargetReg(reg));
- }
- }
- RegStorage TargetPtrReg(SpecialTargetRegister reg) OVERRIDE {
- return As64BitReg(TargetReg(reg));
- }
- RegLocation GetReturnAlt();
- RegLocation GetReturnWideAlt();
- RegLocation LocCReturn();
- RegLocation LocCReturnRef();
- RegLocation LocCReturnDouble();
- RegLocation LocCReturnFloat();
- RegLocation LocCReturnWide();
- ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
- void AdjustSpillMask();
- void ClobberCallerSave();
- void FreeCallTemps();
- void LockCallTemps();
- void CompilerInitializeRegAlloc();
-
- // Required for target - miscellaneous.
- void AssembleLIR();
- int AssignInsnOffsets();
- void AssignOffsets();
- AssemblerStatus AssembleInstructions(CodeOffset start_addr);
- void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
- void SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
- ResourceMask* def_mask) OVERRIDE;
- const char* GetTargetInstFmt(int opcode);
- const char* GetTargetInstName(int opcode);
- std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
- ResourceMask GetPCUseDefEncoding() const OVERRIDE;
- uint64_t GetTargetInstFlags(int opcode);
- size_t GetInsnSize(LIR* lir) OVERRIDE;
- bool IsUnconditionalBranch(LIR* lir);
-
- // Get the register class for load/store of a field.
- RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) OVERRIDE;
-
- // Required for target - Dalvik-level generators.
- void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation lr_shift);
- void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, int flags);
- void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
- RegLocation rl_dest, int scale);
- void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, RegLocation rl_index,
- RegLocation rl_src, int scale, bool card_mark);
- void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_shift, int flags);
- void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2);
- void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
- bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
- bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
- bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object);
- bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long);
- bool GenInlinedSqrt(CallInfo* info);
- bool GenInlinedPeek(CallInfo* info, OpSize size);
- bool GenInlinedPoke(CallInfo* info, OpSize size);
- void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
- void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, int flags) OVERRIDE;
- RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, bool is_div);
- RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div);
- void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivZeroCheckWide(RegStorage reg);
- void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
- void GenExitSequence();
- void GenSpecialExitSequence() OVERRIDE;
- void GenSpecialEntryForSuspend() OVERRIDE;
- void GenSpecialExitForSuspend() OVERRIDE;
- void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
- void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
- void GenSelect(BasicBlock* bb, MIR* mir);
- void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
- int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) OVERRIDE;
- bool GenMemBarrier(MemBarrierKind barrier_kind);
- void GenMoveException(RegLocation rl_dest);
- void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit);
- void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
- void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
- void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
- bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
-
- // Required for target - single operation generators.
- LIR* OpUnconditionalBranch(LIR* target);
- LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target);
- LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target);
- LIR* OpCondBranch(ConditionCode cc, LIR* target);
- LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target);
- LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpIT(ConditionCode cond, const char* guide);
- void OpEndIT(LIR* it);
- LIR* OpMem(OpKind op, RegStorage r_base, int disp);
- void OpPcRelLoad(RegStorage reg, LIR* target);
- LIR* OpReg(OpKind op, RegStorage r_dest_src);
- void OpRegCopy(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
- LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value);
- LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2);
- LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type);
- LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
- LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
- LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
- LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
- LIR* OpTestSuspend(LIR* target);
- LIR* OpVldm(RegStorage r_base, int count);
- LIR* OpVstm(RegStorage r_base, int count);
- void OpRegCopyWide(RegStorage dest, RegStorage src);
-
- // TODO: collapse r_dest.
- LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
- // TODO: collapse r_src.
- LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
- void SpillCoreRegs();
- void UnSpillCoreRegs();
- static const Mips64EncodingMap EncodingMap[kMips64Last];
- bool InexpensiveConstantInt(int32_t value);
- bool InexpensiveConstantFloat(int32_t value);
- bool InexpensiveConstantLong(int64_t value);
- bool InexpensiveConstantDouble(int64_t value);
-
- bool WideGPRsAreAliases() const OVERRIDE {
- return true; // 64b architecture.
- }
- bool WideFPRsAreAliases() const OVERRIDE {
- return true; // 64b architecture.
- }
-
- LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
- RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, int flags) OVERRIDE;
- RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div)
- OVERRIDE;
- NextCallInsn GetNextSDCallInsn() OVERRIDE;
- LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) OVERRIDE;
- // Unimplemented intrinsics.
- bool GenInlinedCharAt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedAbsInt(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedAbsLong(CallInfo* info ATTRIBUTE_UNUSED) OVERRIDE {
- return false;
- }
- bool GenInlinedIndexOf(CallInfo* info ATTRIBUTE_UNUSED, bool zero_based ATTRIBUTE_UNUSED)
- OVERRIDE {
- return false;
- }
-
- private:
- void GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenNotLong(RegLocation rl_dest, RegLocation rl_src);
- void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
- void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
- void GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags);
- void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src,
- RegisterClass reg_class);
-
- void ConvertShortToLongBranch(LIR* lir);
-
- /**
- * @param reg #RegStorage containing a Solo64 input register (e.g. @c a1 or @c d0).
- * @return A Solo32 with the same register number as the @p reg (e.g. @c a1 or @c f0).
- * @see As64BitReg
- */
- RegStorage As32BitReg(RegStorage reg) {
- DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 64b register";
- } else {
- LOG(WARNING) << "Expected 64b register";
- return reg;
- }
- }
- RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
- reg.GetRawBits() & RegStorage::kRegTypeMask);
- DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
- ->GetReg().GetReg(),
- ret_val.GetReg());
- return ret_val;
- }
-
- RegStorage Check32BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 32b register";
- } else {
- LOG(WARNING) << "Checked for 32b register";
- return As32BitReg(reg);
- }
- }
- return reg;
- }
-
- /**
- * @param reg #RegStorage containing a Solo32 input register (e.g. @c a1 or @c f0).
- * @return A Solo64 with the same register number as the @p reg (e.g. @c a1 or @c d0).
- */
- RegStorage As64BitReg(RegStorage reg) {
- DCHECK(!reg.IsPair());
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Expected 32b register";
- } else {
- LOG(WARNING) << "Expected 32b register";
- return reg;
- }
- }
- RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
- reg.GetRawBits() & RegStorage::kRegTypeMask);
- DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
- ->GetReg().GetReg(),
- ret_val.GetReg());
- return ret_val;
- }
-
- RegStorage Check64BitReg(RegStorage reg) {
- if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
- if (kFailOnSizeError) {
- LOG(FATAL) << "Checked for 64b register";
- } else {
- LOG(WARNING) << "Checked for 64b register";
- return As64BitReg(reg);
- }
- }
- return reg;
- }
-
- void GenBreakpoint(int code);
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_QUICK_MIPS64_CODEGEN_MIPS64_H_
diff --git a/compiler/dex/quick/mips64/fp_mips64.cc b/compiler/dex/quick/mips64/fp_mips64.cc
deleted file mode 100644
index 5c8ee9ccb8..0000000000
--- a/compiler/dex/quick/mips64/fp_mips64.cc
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips64.h"
-
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-void Mips64Mir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- int op = kMips64Nop;
- RegLocation rl_result;
-
- /*
- * Don't attempt to optimize register usage since these opcodes call out to
- * the handlers.
- */
- switch (opcode) {
- case Instruction::ADD_FLOAT_2ADDR:
- case Instruction::ADD_FLOAT:
- op = kMips64Fadds;
- break;
- case Instruction::SUB_FLOAT_2ADDR:
- case Instruction::SUB_FLOAT:
- op = kMips64Fsubs;
- break;
- case Instruction::DIV_FLOAT_2ADDR:
- case Instruction::DIV_FLOAT:
- op = kMips64Fdivs;
- break;
- case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::MUL_FLOAT:
- op = kMips64Fmuls;
- break;
- case Instruction::REM_FLOAT_2ADDR:
- case Instruction::REM_FLOAT:
- FlushAllRegs(); // Send everything to home location.
- CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
- rl_result = GetReturn(kFPReg);
- StoreValue(rl_dest, rl_result);
- return;
- case Instruction::NEG_FLOAT:
- GenNegFloat(rl_dest, rl_src1);
- return;
- default:
- LOG(FATAL) << "Unexpected opcode: " << opcode;
- }
- rl_src1 = LoadValue(rl_src1, kFPReg);
- rl_src2 = LoadValue(rl_src2, kFPReg);
- rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- StoreValue(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
- int op = kMips64Nop;
- RegLocation rl_result;
-
- switch (opcode) {
- case Instruction::ADD_DOUBLE_2ADDR:
- case Instruction::ADD_DOUBLE:
- op = kMips64Faddd;
- break;
- case Instruction::SUB_DOUBLE_2ADDR:
- case Instruction::SUB_DOUBLE:
- op = kMips64Fsubd;
- break;
- case Instruction::DIV_DOUBLE_2ADDR:
- case Instruction::DIV_DOUBLE:
- op = kMips64Fdivd;
- break;
- case Instruction::MUL_DOUBLE_2ADDR:
- case Instruction::MUL_DOUBLE:
- op = kMips64Fmuld;
- break;
- case Instruction::REM_DOUBLE_2ADDR:
- case Instruction::REM_DOUBLE:
- FlushAllRegs(); // Send everything to home location.
- CallRuntimeHelperRegLocationRegLocation(kQuickFmod, rl_src1, rl_src2, false);
- rl_result = GetReturnWide(kFPReg);
- StoreValueWide(rl_dest, rl_result);
- return;
- case Instruction::NEG_DOUBLE:
- GenNegDouble(rl_dest, rl_src1);
- return;
- default:
- LOG(FATAL) << "Unpexpected opcode: " << opcode;
- }
- rl_src1 = LoadValueWide(rl_src1, kFPReg);
- DCHECK(rl_src1.wide);
- rl_src2 = LoadValueWide(rl_src2, kFPReg);
- DCHECK(rl_src2.wide);
- rl_result = EvalLoc(rl_dest, kFPReg, true);
- DCHECK(rl_dest.wide);
- DCHECK(rl_result.wide);
- NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) {
- // TODO: need mips64 implementation.
- UNUSED(rl_dest, rl_src1, constant);
- LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips64";
-}
-
-void Mips64Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) {
- // TODO: need mips64 implementation.
- UNUSED(rl_dest, rl_src1, constant);
- LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips64";
-}
-
-void Mips64Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src) {
- int op = kMips64Nop;
- RegLocation rl_result;
- switch (opcode) {
- case Instruction::INT_TO_FLOAT:
- op = kMips64Fcvtsw;
- break;
- case Instruction::DOUBLE_TO_FLOAT:
- op = kMips64Fcvtsd;
- break;
- case Instruction::FLOAT_TO_DOUBLE:
- op = kMips64Fcvtds;
- break;
- case Instruction::INT_TO_DOUBLE:
- op = kMips64Fcvtdw;
- break;
- case Instruction::FLOAT_TO_INT:
- GenConversionCall(kQuickF2iz, rl_dest, rl_src, kCoreReg);
- return;
- case Instruction::DOUBLE_TO_INT:
- GenConversionCall(kQuickD2iz, rl_dest, rl_src, kCoreReg);
- return;
- case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(kQuickL2d, rl_dest, rl_src, kFPReg);
- return;
- case Instruction::FLOAT_TO_LONG:
- GenConversionCall(kQuickF2l, rl_dest, rl_src, kCoreReg);
- return;
- case Instruction::LONG_TO_FLOAT:
- GenConversionCall(kQuickL2f, rl_dest, rl_src, kFPReg);
- return;
- case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(kQuickD2l, rl_dest, rl_src, kCoreReg);
- return;
- default:
- LOG(FATAL) << "Unexpected opcode: " << opcode;
- }
- if (rl_src.wide) {
- rl_src = LoadValueWide(rl_src, kFPReg);
- } else {
- rl_src = LoadValue(rl_src, kFPReg);
- }
- rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- if (rl_dest.wide) {
- StoreValueWide(rl_dest, rl_result);
- } else {
- StoreValue(rl_dest, rl_result);
- }
-}
-
-void Mips64Mir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- bool wide = true;
- QuickEntrypointEnum target;
-
- switch (opcode) {
- case Instruction::CMPL_FLOAT:
- target = kQuickCmplFloat;
- wide = false;
- break;
- case Instruction::CMPG_FLOAT:
- target = kQuickCmpgFloat;
- wide = false;
- break;
- case Instruction::CMPL_DOUBLE:
- target = kQuickCmplDouble;
- break;
- case Instruction::CMPG_DOUBLE:
- target = kQuickCmpgDouble;
- break;
- default:
- LOG(FATAL) << "Unexpected opcode: " << opcode;
- target = kQuickCmplFloat;
- }
- FlushAllRegs();
- LockCallTemps();
- if (wide) {
- RegStorage r_tmp1(RegStorage::k64BitSolo, rMIPS64_FARG0);
- RegStorage r_tmp2(RegStorage::k64BitSolo, rMIPS64_FARG1);
- LoadValueDirectWideFixed(rl_src1, r_tmp1);
- LoadValueDirectWideFixed(rl_src2, r_tmp2);
- } else {
- LoadValueDirectFixed(rl_src1, rs_rMIPS64_FARG0);
- LoadValueDirectFixed(rl_src2, rs_rMIPS64_FARG1);
- }
- RegStorage r_tgt = LoadHelper(target);
- // NOTE: not a safepoint.
- OpReg(kOpBlx, r_tgt);
- RegLocation rl_result = GetReturn(kCoreReg);
- StoreValue(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
- UNUSED(bb, mir, gt_bias, is_double);
- UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
-}
-
-void Mips64Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
- RegLocation rl_result;
- rl_src = LoadValue(rl_src, kFPReg);
- rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kMips64Fnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- StoreValue(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
- RegLocation rl_result;
- rl_src = LoadValueWide(rl_src, kFPReg);
- rl_result = EvalLocWide(rl_dest, kFPReg, true);
- NewLIR2(kMips64Fnegd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- StoreValueWide(rl_dest, rl_result);
-}
-
-bool Mips64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
- // TODO: need Mips64 implementation.
- UNUSED(info, is_min, is_long);
- return false;
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/int_mips64.cc b/compiler/dex/quick/mips64/int_mips64.cc
deleted file mode 100644
index 5c545bb824..0000000000
--- a/compiler/dex/quick/mips64/int_mips64.cc
+++ /dev/null
@@ -1,692 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the Mips64 ISA */
-
-#include "codegen_mips64.h"
-
-#include "base/logging.h"
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "mips64_lir.h"
-#include "mirror/array-inl.h"
-
-namespace art {
-
-/*
- * Compare two 64-bit values
- * x = y return 0
- * x < y return -1
- * x > y return 1
- *
- * slt temp, x, y; # (x < y) ? 1:0
- * slt res, y, x; # (x > y) ? 1:0
- * subu res, res, temp; # res = -1:1:0 for [ < > = ]
- *
- */
-void Mips64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- RegStorage temp = AllocTempWide();
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(kMips64Slt, temp.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- NewLIR3(kMips64Slt, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
- NewLIR3(kMips64Subu, rl_result.reg.GetReg(), rl_result.reg.GetReg(), temp.GetReg());
- FreeTemp(temp);
- StoreValue(rl_dest, rl_result);
-}
-
-LIR* Mips64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
- LIR* branch;
- Mips64OpCode slt_op;
- Mips64OpCode br_op;
- bool cmp_zero = false;
- bool swapped = false;
- switch (cond) {
- case kCondEq:
- br_op = kMips64Beq;
- cmp_zero = true;
- break;
- case kCondNe:
- br_op = kMips64Bne;
- cmp_zero = true;
- break;
- case kCondUlt:
- slt_op = kMips64Sltu;
- br_op = kMips64Bnez;
- break;
- case kCondUge:
- slt_op = kMips64Sltu;
- br_op = kMips64Beqz;
- break;
- case kCondGe:
- slt_op = kMips64Slt;
- br_op = kMips64Beqz;
- break;
- case kCondGt:
- slt_op = kMips64Slt;
- br_op = kMips64Bnez;
- swapped = true;
- break;
- case kCondLe:
- slt_op = kMips64Slt;
- br_op = kMips64Beqz;
- swapped = true;
- break;
- case kCondLt:
- slt_op = kMips64Slt;
- br_op = kMips64Bnez;
- break;
- case kCondHi: // Gtu
- slt_op = kMips64Sltu;
- br_op = kMips64Bnez;
- swapped = true;
- break;
- default:
- LOG(FATAL) << "No support for ConditionCode: " << cond;
- return NULL;
- }
- if (cmp_zero) {
- branch = NewLIR2(br_op, src1.GetReg(), src2.GetReg());
- } else {
- RegStorage t_reg = AllocTemp();
- if (swapped) {
- NewLIR3(slt_op, t_reg.GetReg(), src2.GetReg(), src1.GetReg());
- } else {
- NewLIR3(slt_op, t_reg.GetReg(), src1.GetReg(), src2.GetReg());
- }
- branch = NewLIR1(br_op, t_reg.GetReg());
- FreeTemp(t_reg);
- }
- branch->target = target;
- return branch;
-}
-
-LIR* Mips64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg,
- int check_value, LIR* target) {
- LIR* branch;
- if (check_value != 0) {
- // TUNING: handle s16 & kCondLt/Mi case using slti.
- RegStorage t_reg = AllocTemp();
- LoadConstant(t_reg, check_value);
- branch = OpCmpBranch(cond, reg, t_reg, target);
- FreeTemp(t_reg);
- return branch;
- }
- Mips64OpCode opc;
- switch (cond) {
- case kCondEq: opc = kMips64Beqz; break;
- case kCondGe: opc = kMips64Bgez; break;
- case kCondGt: opc = kMips64Bgtz; break;
- case kCondLe: opc = kMips64Blez; break;
- // case KCondMi:
- case kCondLt: opc = kMips64Bltz; break;
- case kCondNe: opc = kMips64Bnez; break;
- default:
- // Tuning: use slti when applicable.
- RegStorage t_reg = AllocTemp();
- LoadConstant(t_reg, check_value);
- branch = OpCmpBranch(cond, reg, t_reg, target);
- FreeTemp(t_reg);
- return branch;
- }
- branch = NewLIR1(opc, reg.GetReg());
- branch->target = target;
- return branch;
-}
-
-LIR* Mips64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
- DCHECK(!r_dest.IsPair() && !r_src.IsPair());
- if (r_dest.IsFloat() || r_src.IsFloat())
- return OpFpRegCopy(r_dest, r_src);
- // TODO: Check that r_src and r_dest are both 32 or both 64 bits length.
- LIR* res;
- if (r_dest.Is64Bit() || r_src.Is64Bit()) {
- res = RawLIR(current_dalvik_offset_, kMips64Move, r_dest.GetReg(), r_src.GetReg());
- } else {
- res = RawLIR(current_dalvik_offset_, kMips64Sll, r_dest.GetReg(), r_src.GetReg(), 0);
- }
- if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
- res->flags.is_nop = true;
- }
- return res;
-}
-
-void Mips64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
- if (r_dest != r_src) {
- LIR *res = OpRegCopyNoInsert(r_dest, r_src);
- AppendLIR(res);
- }
-}
-
-void Mips64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
- OpRegCopy(r_dest, r_src);
-}
-
-void Mips64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
- int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) {
- UNUSED(dest_reg_class);
- // Implement as a branch-over.
- // TODO: Conditional move?
- LoadConstant(rs_dest, true_val);
- LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
- LoadConstant(rs_dest, false_val);
- LIR* target_label = NewLIR0(kPseudoTargetLabel);
- ne_branchover->target = target_label;
-}
-
-void Mips64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
- UNIMPLEMENTED(FATAL) << "Need codegen for select";
-}
-
-void Mips64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
- UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
-}
-
-RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
- bool is_div) {
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), reg2.GetReg());
- return rl_result;
-}
-
-RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit,
- bool is_div) {
- RegStorage t_reg = AllocTemp();
- NewLIR3(kMips64Addiu, t_reg.GetReg(), rZERO, lit);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(is_div ? kMips64Div : kMips64Mod, rl_result.reg.GetReg(), reg1.GetReg(), t_reg.GetReg());
- FreeTemp(t_reg);
- return rl_result;
-}
-
-RegLocation Mips64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
- LOG(FATAL) << "Unexpected use of GenDivRem for Mips64";
- UNREACHABLE();
-}
-
-RegLocation Mips64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
- LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips64";
- UNREACHABLE();
-}
-
-bool Mips64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
- UNUSED(info, is_long, is_object);
- return false;
-}
-
-bool Mips64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- UNUSED(info);
- // TODO: add Mips64 implementation.
- return false;
-}
-
-bool Mips64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- UNUSED(info);
- // TODO: add Mips64 implementation.
- return false;
-}
-
-bool Mips64Mir2Lir::GenInlinedSqrt(CallInfo* info) {
- UNUSED(info);
- return false;
-}
-
-bool Mips64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
- if (size != kSignedByte) {
- // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
- return false;
- }
- RegLocation rl_src_address = info->args[0]; // Long address.
- RegLocation rl_dest = InlineTarget(info);
- RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
- RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- DCHECK(size == kSignedByte);
- LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
- StoreValue(rl_dest, rl_result);
- return true;
-}
-
-bool Mips64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
- if (size != kSignedByte) {
- // MIPS64 supports only aligned access. Defer unaligned access to JNI implementation.
- return false;
- }
- RegLocation rl_src_address = info->args[0]; // Long address.
- RegLocation rl_src_value = info->args[2]; // [size] value.
- RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
- DCHECK(size == kSignedByte);
- RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
- return true;
-}
-
-void Mips64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
- UNUSED(reg, target);
- LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
- LOG(FATAL) << "Unexpected use of OpVldm for Mips64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
- LOG(FATAL) << "Unexpected use of OpVstm for Mips64";
- UNREACHABLE();
-}
-
-void Mips64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
- int lit, int first_bit, int second_bit) {
- UNUSED(lit);
- RegStorage t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
- FreeTemp(t_reg);
- if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
- }
-}
-
-void Mips64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
- GenDivZeroCheck(reg);
-}
-
-// Test suspend flag, return target of taken suspend branch.
-LIR* Mips64Mir2Lir::OpTestSuspend(LIR* target) {
- OpRegImm(kOpSub, rs_rMIPS64_SUSPEND, 1);
- return OpCmpImmBranch((target == NULL) ? kCondEq : kCondNe, rs_rMIPS64_SUSPEND, 0, target);
-}
-
-// Decrement register and branch on condition.
-LIR* Mips64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
- OpRegImm(kOpSub, reg, 1);
- return OpCmpImmBranch(c_code, reg, 0, target);
-}
-
-bool Mips64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips64";
- UNREACHABLE();
-}
-
-bool Mips64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of easyMultiply in Mips64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
- UNUSED(cond, guide);
- LOG(FATAL) << "Unexpected use of OpIT in Mips64";
- UNREACHABLE();
-}
-
-void Mips64Mir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
- LOG(FATAL) << "Unexpected use of OpEndIT in Mips64";
-}
-
-void Mips64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2, int flags) {
- switch (opcode) {
- case Instruction::NOT_LONG:
- GenNotLong(rl_dest, rl_src2);
- return;
- case Instruction::ADD_LONG:
- case Instruction::ADD_LONG_2ADDR:
- GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::SUB_LONG:
- case Instruction::SUB_LONG_2ADDR:
- GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::MUL_LONG:
- case Instruction::MUL_LONG_2ADDR:
- GenMulLong(rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::DIV_LONG:
- case Instruction::DIV_LONG_2ADDR:
- GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
- return;
- case Instruction::REM_LONG:
- case Instruction::REM_LONG_2ADDR:
- GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
- return;
- case Instruction::AND_LONG:
- case Instruction::AND_LONG_2ADDR:
- GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::OR_LONG:
- case Instruction::OR_LONG_2ADDR:
- GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::XOR_LONG:
- case Instruction::XOR_LONG_2ADDR:
- GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
- return;
- case Instruction::NEG_LONG:
- GenNegLong(rl_dest, rl_src2);
- return;
-
- default:
- LOG(FATAL) << "Invalid long arith op";
- return;
- }
-}
-
-void Mips64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2) {
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg, rl_src1.reg, rl_src2.reg);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
- rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegReg(kOpMvn, rl_result.reg, rl_src.reg);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
- rl_src = LoadValueWide(rl_src, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- NewLIR3(kMips64Dmul, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2, bool is_div,
- int flags) {
- UNUSED(opcode);
- // TODO: Implement easy div/rem?
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
- GenDivZeroCheckWide(rl_src2.reg);
- }
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- NewLIR3(is_div ? kMips64Ddiv : kMips64Dmod, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
- rl_src2.reg.GetReg());
- StoreValueWide(rl_dest, rl_result);
-}
-
-/*
- * Generate array load
- */
-void Mips64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_dest, int scale) {
- RegisterClass reg_class = RegClassBySize(size);
- int len_offset = mirror::Array::LengthOffset().Int32Value();
- int data_offset;
- RegLocation rl_result;
- rl_array = LoadValue(rl_array, kRefReg);
- rl_index = LoadValue(rl_index, kCoreReg);
-
- // FIXME: need to add support for rl_index.is_const.
-
- if (size == k64 || size == kDouble) {
- data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
- } else {
- data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
- }
-
- // Null object?
- GenNullCheck(rl_array.reg, opt_flags);
-
- RegStorage reg_ptr = AllocTempRef();
- bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- RegStorage reg_len;
- if (needs_range_check) {
- reg_len = AllocTemp();
- // Get len.
- Load32Disp(rl_array.reg, len_offset, reg_len);
- }
- // reg_ptr -> array data.
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
- FreeTemp(rl_array.reg);
- if ((size == k64) || (size == kDouble)) {
- if (scale) {
- RegStorage r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
- OpRegReg(kOpAdd, reg_ptr, r_new_index);
- FreeTemp(r_new_index);
- } else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
- }
- FreeTemp(rl_index.reg);
- rl_result = EvalLoc(rl_dest, reg_class, true);
-
- if (needs_range_check) {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- FreeTemp(reg_len);
- }
- LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, kNotVolatile);
-
- FreeTemp(reg_ptr);
- StoreValueWide(rl_dest, rl_result);
- } else {
- rl_result = EvalLoc(rl_dest, reg_class, true);
-
- if (needs_range_check) {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- FreeTemp(reg_len);
- }
- if (rl_result.ref) {
- LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), As32BitReg(rl_result.reg), scale,
- kReference);
- } else {
- LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
- }
-
- FreeTemp(reg_ptr);
- StoreValue(rl_dest, rl_result);
- }
-}
-
-/*
- * Generate array store
- *
- */
-void Mips64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
- RegLocation rl_index, RegLocation rl_src, int scale,
- bool card_mark) {
- RegisterClass reg_class = RegClassBySize(size);
- int len_offset = mirror::Array::LengthOffset().Int32Value();
- int data_offset;
-
- if (size == k64 || size == kDouble) {
- data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
- } else {
- data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
- }
-
- rl_array = LoadValue(rl_array, kRefReg);
- rl_index = LoadValue(rl_index, kCoreReg);
-
- // FIXME: need to add support for rl_index.is_const.
-
- RegStorage reg_ptr;
- bool allocated_reg_ptr_temp = false;
- if (IsTemp(rl_array.reg) && !card_mark) {
- Clobber(rl_array.reg);
- reg_ptr = rl_array.reg;
- } else {
- reg_ptr = AllocTemp();
- OpRegCopy(reg_ptr, rl_array.reg);
- allocated_reg_ptr_temp = true;
- }
-
- // Null object?
- GenNullCheck(rl_array.reg, opt_flags);
-
- bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
- RegStorage reg_len;
- if (needs_range_check) {
- reg_len = AllocTemp();
- // NOTE: max live temps(4) here.
- // Get len.
- Load32Disp(rl_array.reg, len_offset, reg_len);
- }
- // reg_ptr -> array data.
- OpRegImm(kOpAdd, reg_ptr, data_offset);
- // At this point, reg_ptr points to array, 2 live temps.
- if ((size == k64) || (size == kDouble)) {
- // TUNING: specific wide routine that can handle fp regs.
- if (scale) {
- RegStorage r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.reg, scale);
- OpRegReg(kOpAdd, reg_ptr, r_new_index);
- FreeTemp(r_new_index);
- } else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.reg);
- }
- rl_src = LoadValueWide(rl_src, reg_class);
-
- if (needs_range_check) {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- FreeTemp(reg_len);
- }
-
- StoreBaseDisp(reg_ptr, 0, rl_src.reg, size, kNotVolatile);
- } else {
- rl_src = LoadValue(rl_src, reg_class);
- if (needs_range_check) {
- GenArrayBoundsCheck(rl_index.reg, reg_len);
- FreeTemp(reg_len);
- }
- StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
- }
- if (allocated_reg_ptr_temp) {
- FreeTemp(reg_ptr);
- }
- if (card_mark) {
- MarkGCCard(opt_flags, rl_src.reg, rl_array.reg);
- }
-}
-
-void Mips64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift) {
- OpKind op = kOpBkpt;
- switch (opcode) {
- case Instruction::SHL_LONG:
- case Instruction::SHL_LONG_2ADDR:
- op = kOpLsl;
- break;
- case Instruction::SHR_LONG:
- case Instruction::SHR_LONG_2ADDR:
- op = kOpAsr;
- break;
- case Instruction::USHR_LONG:
- case Instruction::USHR_LONG_2ADDR:
- op = kOpLsr;
- break;
- default:
- LOG(FATAL) << "Unexpected case: " << opcode;
- }
- rl_shift = LoadValue(rl_shift, kCoreReg);
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift, int flags) {
- UNUSED(flags);
- OpKind op = kOpBkpt;
- // Per spec, we only care about low 6 bits of shift amount.
- int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- if (shift_amount == 0) {
- StoreValueWide(rl_dest, rl_src1);
- return;
- }
-
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- switch (opcode) {
- case Instruction::SHL_LONG:
- case Instruction::SHL_LONG_2ADDR:
- op = kOpLsl;
- break;
- case Instruction::SHR_LONG:
- case Instruction::SHR_LONG_2ADDR:
- op = kOpAsr;
- break;
- case Instruction::USHR_LONG:
- case Instruction::USHR_LONG_2ADDR:
- op = kOpLsr;
- break;
- default:
- LOG(FATAL) << "Unexpected case";
- }
- OpRegRegImm(op, rl_result.reg, rl_src1.reg, shift_amount);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2, int flags) {
- // Default - bail to non-const handler.
- GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
-}
-
-void Mips64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
- rl_src = LoadValue(rl_src, kCoreReg);
- RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- NewLIR3(kMips64Sll, rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0);
- StoreValueWide(rl_dest, rl_result);
-}
-
-void Mips64Mir2Lir::GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest,
- RegLocation rl_src, RegisterClass reg_class) {
- FlushAllRegs(); // Send everything to home location.
- CallRuntimeHelperRegLocation(trampoline, rl_src, false);
- if (rl_dest.wide) {
- RegLocation rl_result;
- rl_result = GetReturnWide(reg_class);
- StoreValueWide(rl_dest, rl_result);
- } else {
- RegLocation rl_result;
- rl_result = GetReturn(reg_class);
- StoreValue(rl_dest, rl_result);
- }
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/mips64_lir.h b/compiler/dex/quick/mips64/mips64_lir.h
deleted file mode 100644
index 4a5c5ce3c8..0000000000
--- a/compiler/dex/quick/mips64/mips64_lir.h
+++ /dev/null
@@ -1,648 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
-#define ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
-
-#include "dex/reg_location.h"
-#include "dex/reg_storage.h"
-
-namespace art {
-
-/*
- * Runtime register conventions.
- *
- * zero is always the value 0
- * at is scratch (normally used as temp reg by assembler)
- * v0, v1 are scratch (normally hold subroutine return values)
- * a0-a7 are scratch (normally hold subroutine arguments)
- * t0-t3, t8 are scratch
- * t9 is scratch (normally used for function calls)
- * s0 (rMIPS_SUSPEND) is reserved [holds suspend-check counter]
- * s1 (rMIPS_SELF) is reserved [holds current &Thread]
- * s2-s7 are callee save (promotion target)
- * k0, k1 are reserved for use by interrupt handlers
- * gp is reserved for global pointer
- * sp is reserved
- * s8 is callee save (promotion target)
- * ra is scratch (normally holds the return addr)
- *
- * Preserved across C calls: s0-s8
- * Trashed across C calls: at, v0-v1, a0-a7, t0-t3, t8-t9, gp, ra
- *
- * Floating pointer registers
- * NOTE: there are 32 fp registers.
- * f0-f31
- *
- * f0-f31 trashed across C calls
- *
- * For mips64 code use:
- * a0-a7 to hold operands
- * v0-v1 to hold results
- * t0-t3, t8-t9 for temps
- *
- * All jump/branch instructions have a delay slot after it.
- *
- * Stack frame diagram (stack grows down, higher addresses at top):
- *
- * +------------------------+
- * | IN[ins-1] | {Note: resides in caller's frame}
- * | . |
- * | IN[0] |
- * | caller's Method* |
- * +========================+ {Note: start of callee's frame}
- * | spill region | {variable sized - will include lr if non-leaf.}
- * +------------------------+
- * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
- * +------------------------+
- * | V[locals-1] |
- * | V[locals-2] |
- * | . |
- * | . |
- * | V[1] |
- * | V[0] |
- * +------------------------+
- * | 0 to 3 words padding |
- * +------------------------+
- * | OUT[outs-1] |
- * | OUT[outs-2] |
- * | . |
- * | OUT[0] |
- * | cur_method* | <<== sp w/ 16-byte alignment
- * +========================+
- */
-
-
-#define rARG0 rA0d
-#define rs_rARG0 rs_rA0d
-#define rARG1 rA1d
-#define rs_rARG1 rs_rA1d
-#define rARG2 rA2d
-#define rs_rARG2 rs_rA2d
-#define rARG3 rA3d
-#define rs_rARG3 rs_rA3d
-#define rARG4 rA4d
-#define rs_rARG4 rs_rA4d
-#define rARG5 rA5d
-#define rs_rARG5 rs_rA5d
-#define rARG6 rA6d
-#define rs_rARG6 rs_rA6d
-#define rARG7 rA7d
-#define rs_rARG7 rs_rA7d
-#define rRESULT0 rV0d
-#define rs_rRESULT0 rs_rV0d
-#define rRESULT1 rV1d
-#define rs_rRESULT1 rs_rV1d
-
-#define rFARG0 rF12
-#define rs_rFARG0 rs_rF12
-#define rFARG1 rF13
-#define rs_rFARG1 rs_rF13
-#define rFARG2 rF14
-#define rs_rFARG2 rs_rF14
-#define rFARG3 rF15
-#define rs_rFARG3 rs_rF15
-#define rFARG4 rF16
-#define rs_rFARG4 rs_rF16
-#define rFARG5 rF17
-#define rs_rFARG5 rs_rF17
-#define rFARG6 rF18
-#define rs_rFARG6 rs_rF18
-#define rFARG7 rF19
-#define rs_rFARG7 rs_rF19
-#define rFRESULT0 rF0
-#define rs_rFRESULT0 rs_rF0
-#define rFRESULT1 rF1
-#define rs_rFRESULT1 rs_rF1
-
-// Regs not used for Mips64.
-#define rMIPS64_LR RegStorage::kInvalidRegVal
-#define rMIPS64_PC RegStorage::kInvalidRegVal
-
-enum Mips64ResourceEncodingPos {
- kMips64GPReg0 = 0,
- kMips64RegSP = 29,
- kMips64RegLR = 31,
- kMips64FPReg0 = 32,
- kMips64FPRegEnd = 64,
- kMips64RegPC = kMips64FPRegEnd,
- kMips64RegEnd = 65,
-};
-
-enum Mips64NativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
- rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
- rZEROd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
- rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
- rATd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
- rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
- rV0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
- rV1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
- rV1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
- rA0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
- rA0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
- rA1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
- rA1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
- rA2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
- rA2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
- rA3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
- rA3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
- rA4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
- rA4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
- rA5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
- rA5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
- rA6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
- rA6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
- rA7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
- rA7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
- rT0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
- rT0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
- rT1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
- rT1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
- rT2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
- rT2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
- rT3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
- rT3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
- rS0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
- rS0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 16,
- rS1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 17,
- rS1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 17,
- rS2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 18,
- rS2d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 18,
- rS3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 19,
- rS3d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 19,
- rS4 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 20,
- rS4d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 20,
- rS5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 21,
- rS5d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 21,
- rS6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 22,
- rS6d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 22,
- rS7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 23,
- rS7d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 23,
- rT8 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 24,
- rT8d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 24,
- rT9 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 25,
- rT9d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 25,
- rK0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 26,
- rK0d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 26,
- rK1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 27,
- rK1d = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 27,
- rGP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 28,
- rGPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 28,
- rSP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 29,
- rSPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 29,
- rFP = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 30,
- rFPd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 30,
- rRA = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 31,
- rRAd = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 31,
-
- rF0 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
- rF1 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
- rF2 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
- rF3 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 3,
- rF4 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 4,
- rF5 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
- rF6 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
- rF7 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
- rF8 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 8,
- rF9 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 9,
- rF10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
- rF11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
- rF12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
- rF13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
- rF14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
- rF15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
- rF16 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 16,
- rF17 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 17,
- rF18 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 18,
- rF19 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 19,
- rF20 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 20,
- rF21 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 21,
- rF22 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 22,
- rF23 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 23,
- rF24 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 24,
- rF25 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 25,
- rF26 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 26,
- rF27 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 27,
- rF28 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 28,
- rF29 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 29,
- rF30 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 30,
- rF31 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 31,
-
- rD0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
- rD1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
- rD2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
- rD3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
- rD4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
- rD5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
- rD6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
- rD7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
- rD8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
- rD9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
- rD10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
- rD11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
- rD12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
- rD13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
- rD14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
- rD15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
- rD16 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 16,
- rD17 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 17,
- rD18 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 18,
- rD19 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 19,
- rD20 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 20,
- rD21 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 21,
- rD22 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 22,
- rD23 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 23,
- rD24 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 24,
- rD25 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 25,
- rD26 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 26,
- rD27 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 27,
- rD28 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 28,
- rD29 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 29,
- rD30 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 30,
- rD31 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 31,
-};
-
-constexpr RegStorage rs_rZERO(RegStorage::kValid | rZERO);
-constexpr RegStorage rs_rZEROd(RegStorage::kValid | rZEROd);
-constexpr RegStorage rs_rAT(RegStorage::kValid | rAT);
-constexpr RegStorage rs_rATd(RegStorage::kValid | rATd);
-constexpr RegStorage rs_rV0(RegStorage::kValid | rV0);
-constexpr RegStorage rs_rV0d(RegStorage::kValid | rV0d);
-constexpr RegStorage rs_rV1(RegStorage::kValid | rV1);
-constexpr RegStorage rs_rV1d(RegStorage::kValid | rV1d);
-constexpr RegStorage rs_rA0(RegStorage::kValid | rA0);
-constexpr RegStorage rs_rA0d(RegStorage::kValid | rA0d);
-constexpr RegStorage rs_rA1(RegStorage::kValid | rA1);
-constexpr RegStorage rs_rA1d(RegStorage::kValid | rA1d);
-constexpr RegStorage rs_rA2(RegStorage::kValid | rA2);
-constexpr RegStorage rs_rA2d(RegStorage::kValid | rA2d);
-constexpr RegStorage rs_rA3(RegStorage::kValid | rA3);
-constexpr RegStorage rs_rA3d(RegStorage::kValid | rA3d);
-constexpr RegStorage rs_rA4(RegStorage::kValid | rA4);
-constexpr RegStorage rs_rA4d(RegStorage::kValid | rA4d);
-constexpr RegStorage rs_rA5(RegStorage::kValid | rA5);
-constexpr RegStorage rs_rA5d(RegStorage::kValid | rA5d);
-constexpr RegStorage rs_rA6(RegStorage::kValid | rA6);
-constexpr RegStorage rs_rA6d(RegStorage::kValid | rA6d);
-constexpr RegStorage rs_rA7(RegStorage::kValid | rA7);
-constexpr RegStorage rs_rA7d(RegStorage::kValid | rA7d);
-constexpr RegStorage rs_rT0(RegStorage::kValid | rT0);
-constexpr RegStorage rs_rT0d(RegStorage::kValid | rT0d);
-constexpr RegStorage rs_rT1(RegStorage::kValid | rT1);
-constexpr RegStorage rs_rT1d(RegStorage::kValid | rT1d);
-constexpr RegStorage rs_rT2(RegStorage::kValid | rT2);
-constexpr RegStorage rs_rT2d(RegStorage::kValid | rT2d);
-constexpr RegStorage rs_rT3(RegStorage::kValid | rT3);
-constexpr RegStorage rs_rT3d(RegStorage::kValid | rT3d);
-constexpr RegStorage rs_rS0(RegStorage::kValid | rS0);
-constexpr RegStorage rs_rS0d(RegStorage::kValid | rS0d);
-constexpr RegStorage rs_rS1(RegStorage::kValid | rS1);
-constexpr RegStorage rs_rS1d(RegStorage::kValid | rS1d);
-constexpr RegStorage rs_rS2(RegStorage::kValid | rS2);
-constexpr RegStorage rs_rS2d(RegStorage::kValid | rS2d);
-constexpr RegStorage rs_rS3(RegStorage::kValid | rS3);
-constexpr RegStorage rs_rS3d(RegStorage::kValid | rS3d);
-constexpr RegStorage rs_rS4(RegStorage::kValid | rS4);
-constexpr RegStorage rs_rS4d(RegStorage::kValid | rS4d);
-constexpr RegStorage rs_rS5(RegStorage::kValid | rS5);
-constexpr RegStorage rs_rS5d(RegStorage::kValid | rS5d);
-constexpr RegStorage rs_rS6(RegStorage::kValid | rS6);
-constexpr RegStorage rs_rS6d(RegStorage::kValid | rS6d);
-constexpr RegStorage rs_rS7(RegStorage::kValid | rS7);
-constexpr RegStorage rs_rS7d(RegStorage::kValid | rS7d);
-constexpr RegStorage rs_rT8(RegStorage::kValid | rT8);
-constexpr RegStorage rs_rT8d(RegStorage::kValid | rT8d);
-constexpr RegStorage rs_rT9(RegStorage::kValid | rT9);
-constexpr RegStorage rs_rT9d(RegStorage::kValid | rT9d);
-constexpr RegStorage rs_rK0(RegStorage::kValid | rK0);
-constexpr RegStorage rs_rK0d(RegStorage::kValid | rK0d);
-constexpr RegStorage rs_rK1(RegStorage::kValid | rK1);
-constexpr RegStorage rs_rK1d(RegStorage::kValid | rK1d);
-constexpr RegStorage rs_rGP(RegStorage::kValid | rGP);
-constexpr RegStorage rs_rGPd(RegStorage::kValid | rGPd);
-constexpr RegStorage rs_rSP(RegStorage::kValid | rSP);
-constexpr RegStorage rs_rSPd(RegStorage::kValid | rSPd);
-constexpr RegStorage rs_rFP(RegStorage::kValid | rFP);
-constexpr RegStorage rs_rFPd(RegStorage::kValid | rFPd);
-constexpr RegStorage rs_rRA(RegStorage::kValid | rRA);
-constexpr RegStorage rs_rRAd(RegStorage::kValid | rRAd);
-
-constexpr RegStorage rs_rMIPS64_LR(RegStorage::kInvalid); // Not used for MIPS64.
-constexpr RegStorage rs_rMIPS64_PC(RegStorage::kInvalid); // Not used for MIPS64.
-constexpr RegStorage rs_rMIPS64_COUNT(RegStorage::kInvalid); // Not used for MIPS64.
-
-constexpr RegStorage rs_rF0(RegStorage::kValid | rF0);
-constexpr RegStorage rs_rF1(RegStorage::kValid | rF1);
-constexpr RegStorage rs_rF2(RegStorage::kValid | rF2);
-constexpr RegStorage rs_rF3(RegStorage::kValid | rF3);
-constexpr RegStorage rs_rF4(RegStorage::kValid | rF4);
-constexpr RegStorage rs_rF5(RegStorage::kValid | rF5);
-constexpr RegStorage rs_rF6(RegStorage::kValid | rF6);
-constexpr RegStorage rs_rF7(RegStorage::kValid | rF7);
-constexpr RegStorage rs_rF8(RegStorage::kValid | rF8);
-constexpr RegStorage rs_rF9(RegStorage::kValid | rF9);
-constexpr RegStorage rs_rF10(RegStorage::kValid | rF10);
-constexpr RegStorage rs_rF11(RegStorage::kValid | rF11);
-constexpr RegStorage rs_rF12(RegStorage::kValid | rF12);
-constexpr RegStorage rs_rF13(RegStorage::kValid | rF13);
-constexpr RegStorage rs_rF14(RegStorage::kValid | rF14);
-constexpr RegStorage rs_rF15(RegStorage::kValid | rF15);
-constexpr RegStorage rs_rF16(RegStorage::kValid | rF16);
-constexpr RegStorage rs_rF17(RegStorage::kValid | rF17);
-constexpr RegStorage rs_rF18(RegStorage::kValid | rF18);
-constexpr RegStorage rs_rF19(RegStorage::kValid | rF19);
-constexpr RegStorage rs_rF20(RegStorage::kValid | rF20);
-constexpr RegStorage rs_rF21(RegStorage::kValid | rF21);
-constexpr RegStorage rs_rF22(RegStorage::kValid | rF22);
-constexpr RegStorage rs_rF23(RegStorage::kValid | rF23);
-constexpr RegStorage rs_rF24(RegStorage::kValid | rF24);
-constexpr RegStorage rs_rF25(RegStorage::kValid | rF25);
-constexpr RegStorage rs_rF26(RegStorage::kValid | rF26);
-constexpr RegStorage rs_rF27(RegStorage::kValid | rF27);
-constexpr RegStorage rs_rF28(RegStorage::kValid | rF28);
-constexpr RegStorage rs_rF29(RegStorage::kValid | rF29);
-constexpr RegStorage rs_rF30(RegStorage::kValid | rF30);
-constexpr RegStorage rs_rF31(RegStorage::kValid | rF31);
-
-constexpr RegStorage rs_rD0(RegStorage::kValid | rD0);
-constexpr RegStorage rs_rD1(RegStorage::kValid | rD1);
-constexpr RegStorage rs_rD2(RegStorage::kValid | rD2);
-constexpr RegStorage rs_rD3(RegStorage::kValid | rD3);
-constexpr RegStorage rs_rD4(RegStorage::kValid | rD4);
-constexpr RegStorage rs_rD5(RegStorage::kValid | rD5);
-constexpr RegStorage rs_rD6(RegStorage::kValid | rD6);
-constexpr RegStorage rs_rD7(RegStorage::kValid | rD7);
-constexpr RegStorage rs_rD8(RegStorage::kValid | rD8);
-constexpr RegStorage rs_rD9(RegStorage::kValid | rD9);
-constexpr RegStorage rs_rD10(RegStorage::kValid | rD10);
-constexpr RegStorage rs_rD11(RegStorage::kValid | rD11);
-constexpr RegStorage rs_rD12(RegStorage::kValid | rD12);
-constexpr RegStorage rs_rD13(RegStorage::kValid | rD13);
-constexpr RegStorage rs_rD14(RegStorage::kValid | rD14);
-constexpr RegStorage rs_rD15(RegStorage::kValid | rD15);
-constexpr RegStorage rs_rD16(RegStorage::kValid | rD16);
-constexpr RegStorage rs_rD17(RegStorage::kValid | rD17);
-constexpr RegStorage rs_rD18(RegStorage::kValid | rD18);
-constexpr RegStorage rs_rD19(RegStorage::kValid | rD19);
-constexpr RegStorage rs_rD20(RegStorage::kValid | rD20);
-constexpr RegStorage rs_rD21(RegStorage::kValid | rD21);
-constexpr RegStorage rs_rD22(RegStorage::kValid | rD22);
-constexpr RegStorage rs_rD23(RegStorage::kValid | rD23);
-constexpr RegStorage rs_rD24(RegStorage::kValid | rD24);
-constexpr RegStorage rs_rD25(RegStorage::kValid | rD25);
-constexpr RegStorage rs_rD26(RegStorage::kValid | rD26);
-constexpr RegStorage rs_rD27(RegStorage::kValid | rD27);
-constexpr RegStorage rs_rD28(RegStorage::kValid | rD28);
-constexpr RegStorage rs_rD29(RegStorage::kValid | rD29);
-constexpr RegStorage rs_rD30(RegStorage::kValid | rD30);
-constexpr RegStorage rs_rD31(RegStorage::kValid | rD31);
-
-// TODO: reduce/eliminate use of these.
-#define rMIPS64_SUSPEND rS0d
-#define rs_rMIPS64_SUSPEND rs_rS0d
-#define rMIPS64_SELF rS1d
-#define rs_rMIPS64_SELF rs_rS1d
-#define rMIPS64_SP rSPd
-#define rs_rMIPS64_SP rs_rSPd
-#define rMIPS64_ARG0 rARG0
-#define rs_rMIPS64_ARG0 rs_rARG0
-#define rMIPS64_ARG1 rARG1
-#define rs_rMIPS64_ARG1 rs_rARG1
-#define rMIPS64_ARG2 rARG2
-#define rs_rMIPS64_ARG2 rs_rARG2
-#define rMIPS64_ARG3 rARG3
-#define rs_rMIPS64_ARG3 rs_rARG3
-#define rMIPS64_ARG4 rARG4
-#define rs_rMIPS64_ARG4 rs_rARG4
-#define rMIPS64_ARG5 rARG5
-#define rs_rMIPS64_ARG5 rs_rARG5
-#define rMIPS64_ARG6 rARG6
-#define rs_rMIPS64_ARG6 rs_rARG6
-#define rMIPS64_ARG7 rARG7
-#define rs_rMIPS64_ARG7 rs_rARG7
-#define rMIPS64_FARG0 rFARG0
-#define rs_rMIPS64_FARG0 rs_rFARG0
-#define rMIPS64_FARG1 rFARG1
-#define rs_rMIPS64_FARG1 rs_rFARG1
-#define rMIPS64_FARG2 rFARG2
-#define rs_rMIPS64_FARG2 rs_rFARG2
-#define rMIPS64_FARG3 rFARG3
-#define rs_rMIPS64_FARG3 rs_rFARG3
-#define rMIPS64_FARG4 rFARG4
-#define rs_rMIPS64_FARG4 rs_rFARG4
-#define rMIPS64_FARG5 rFARG5
-#define rs_rMIPS64_FARG5 rs_rFARG5
-#define rMIPS64_FARG6 rFARG6
-#define rs_rMIPS64_FARG6 rs_rFARG6
-#define rMIPS64_FARG7 rFARG7
-#define rs_rMIPS64_FARG7 rs_rFARG7
-#define rMIPS64_RET0 rRESULT0
-#define rs_rMIPS64_RET0 rs_rRESULT0
-#define rMIPS64_RET1 rRESULT1
-#define rs_rMIPS64_RET1 rs_rRESULT1
-#define rMIPS64_INVOKE_TGT rT9d
-#define rs_rMIPS64_INVOKE_TGT rs_rT9d
-#define rMIPS64_COUNT RegStorage::kInvalidRegVal
-
-// RegisterLocation templates return values (r_V0).
-const RegLocation mips64_loc_c_return
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k32BitSolo, rV0), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_ref
- {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
- RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_wide
- {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k64BitSolo, rV0d), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_float
- {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
- RegStorage(RegStorage::k32BitSolo, rF0), INVALID_SREG, INVALID_SREG};
-const RegLocation mips64_loc_c_return_double
- {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
- RegStorage(RegStorage::k64BitSolo, rD0), INVALID_SREG, INVALID_SREG};
-
-enum Mips64ShiftEncodings {
- kMips64Lsl = 0x0,
- kMips64Lsr = 0x1,
- kMips64Asr = 0x2,
- kMips64Ror = 0x3
-};
-
-// MIPS64 sync kinds (Note: support for kinds other than kSYNC0 may not exist).
-#define kSYNC0 0x00
-#define kSYNC_WMB 0x04
-#define kSYNC_MB 0x01
-#define kSYNC_ACQUIRE 0x11
-#define kSYNC_RELEASE 0x12
-#define kSYNC_RMB 0x13
-
-// TODO: Use smaller hammer when appropriate for target CPU.
-#define kST kSYNC0
-#define kSY kSYNC0
-
-/*
- * The following enum defines the list of supported Mips64 instructions by the
- * assembler. Their corresponding EncodingMap positions will be defined in
- * assemble_mips64.cc.
- */
-enum Mips64OpCode {
- kMips64First = 0,
- kMips6432BitData = kMips64First, // data [31..0].
- kMips64Addiu, // addiu t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
- kMips64Addu, // add d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100001].
- kMips64And, // and d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100100].
- kMips64Andi, // andi t,s,imm16 [001100] s[25..21] t[20..16] imm16[15..0].
- kMips64B, // b o [0001000000000000] o[15..0].
- kMips64Bal, // bal o [0000010000010001] o[15..0].
- // NOTE: the code tests the range kMips64Beq thru kMips64Bne, so adding an instruction in this
- // range may require updates.
- kMips64Beq, // beq s,t,o [000100] s[25..21] t[20..16] o[15..0].
- kMips64Beqz, // beqz s,o [000100] s[25..21] [00000] o[15..0].
- kMips64Bgez, // bgez s,o [000001] s[25..21] [00001] o[15..0].
- kMips64Bgtz, // bgtz s,o [000111] s[25..21] [00000] o[15..0].
- kMips64Blez, // blez s,o [000110] s[25..21] [00000] o[15..0].
- kMips64Bltz, // bltz s,o [000001] s[25..21] [00000] o[15..0].
- kMips64Bnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
- kMips64Bne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
- kMips64Break, // break code [000000] code[25..6] [001101].
- kMips64Daddiu, // daddiu t,s,imm16 [011001] s[25..21] t[20..16] imm16[15..11].
- kMips64Daddu, // daddu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101101].
- kMips64Dahi, // dahi s,imm16 [000001] s[25..21] [00110] imm16[15..11].
- kMips64Dati, // dati s,imm16 [000001] s[25..21] [11110] imm16[15..11].
- kMips64Daui, // daui t,s,imm16 [011101] s[25..21] t[20..16] imm16[15..11].
- kMips64Ddiv, // ddiv d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011110].
- kMips64Div, // div d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011010].
- kMips64Dmod, // dmod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011110].
- kMips64Dmul, // dmul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011100].
- kMips64Dmfc1, // dmfc1 t,s [01000100001] t[20..16] s[15..11] [00000000000].
- kMips64Dmtc1, // dmtc1 t,s [01000100101] t[20..16] s[15..11] [00000000000].
- kMips64Drotr32, // drotr32 d,t,a [00000000001] t[20..16] d[15..11] a[10..6] [111110].
- kMips64Dsll, // dsll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111000].
- kMips64Dsll32, // dsll32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111100].
- kMips64Dsrl, // dsrl d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111010].
- kMips64Dsrl32, // dsrl32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111110].
- kMips64Dsra, // dsra d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111011].
- kMips64Dsra32, // dsra32 d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [111111].
- kMips64Dsllv, // dsllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010100].
- kMips64Dsrlv, // dsrlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010110].
- kMips64Dsrav, // dsrav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000010111].
- kMips64Dsubu, // dsubu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101111].
- kMips64Ext, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
- kMips64Faddd, // add.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000000].
- kMips64Fadds, // add.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000000].
- kMips64Fdivd, // div.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000011].
- kMips64Fdivs, // div.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000011].
- kMips64Fmuld, // mul.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000010].
- kMips64Fmuls, // mul.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000010].
- kMips64Fsubd, // sub.d d,s,t [01000110001] t[20..16] s[15..11] d[10..6] [000001].
- kMips64Fsubs, // sub.s d,s,t [01000110000] t[20..16] s[15..11] d[10..6] [000001].
- kMips64Fcvtsd, // cvt.s.d d,s [01000110001] [00000] s[15..11] d[10..6] [100000].
- kMips64Fcvtsw, // cvt.s.w d,s [01000110100] [00000] s[15..11] d[10..6] [100000].
- kMips64Fcvtds, // cvt.d.s d,s [01000110000] [00000] s[15..11] d[10..6] [100001].
- kMips64Fcvtdw, // cvt.d.w d,s [01000110100] [00000] s[15..11] d[10..6] [100001].
- kMips64Fcvtws, // cvt.w.d d,s [01000110000] [00000] s[15..11] d[10..6] [100100].
- kMips64Fcvtwd, // cvt.w.d d,s [01000110001] [00000] s[15..11] d[10..6] [100100].
- kMips64Fmovd, // mov.d d,s [01000110001] [00000] s[15..11] d[10..6] [000110].
- kMips64Fmovs, // mov.s d,s [01000110000] [00000] s[15..11] d[10..6] [000110].
- kMips64Fnegd, // neg.d d,s [01000110001] [00000] s[15..11] d[10..6] [000111].
- kMips64Fnegs, // neg.s d,s [01000110000] [00000] s[15..11] d[10..6] [000111].
- kMips64Fldc1, // ldc1 t,o(b) [110101] b[25..21] t[20..16] o[15..0].
- kMips64Flwc1, // lwc1 t,o(b) [110001] b[25..21] t[20..16] o[15..0].
- kMips64Fsdc1, // sdc1 t,o(b) [111101] b[25..21] t[20..16] o[15..0].
- kMips64Fswc1, // swc1 t,o(b) [111001] b[25..21] t[20..16] o[15..0].
- kMips64Jal, // jal t [000011] t[25..0].
- kMips64Jalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
- kMips64Lahi, // lui t,imm16 [00111100000] t[20..16] imm16[15..0] load addr hi.
- kMips64Lalo, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0] load addr lo.
- kMips64Lb, // lb t,o(b) [100000] b[25..21] t[20..16] o[15..0].
- kMips64Lbu, // lbu t,o(b) [100100] b[25..21] t[20..16] o[15..0].
- kMips64Ld, // ld t,o(b) [110111] b[25..21] t[20..16] o[15..0].
- kMips64Lh, // lh t,o(b) [100001] b[25..21] t[20..16] o[15..0].
- kMips64Lhu, // lhu t,o(b) [100101] b[25..21] t[20..16] o[15..0].
- kMips64Lui, // lui t,imm16 [00111100000] t[20..16] imm16[15..0].
- kMips64Lw, // lw t,o(b) [100011] b[25..21] t[20..16] o[15..0].
- kMips64Lwu, // lwu t,o(b) [100111] b[25..21] t[20..16] o[15..0].
- kMips64Mfc1, // mfc1 t,s [01000100000] t[20..16] s[15..11] [00000000000].
- kMips64Mtc1, // mtc1 t,s [01000100100] t[20..16] s[15..11] [00000000000].
- kMips64Move, // move d,s [000000] s[25..21] [00000] d[15..11] [00000101101].
- kMips64Mod, // mod d,s,t [000000] s[25..21] t[20..16] d[15..11] [00011011010].
- kMips64Mul, // mul d,s,t [000000] s[25..21] t[20..16] d[15..11] [00010011000].
- kMips64Nop, // nop [00000000000000000000000000000000].
- kMips64Nor, // nor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100111].
- kMips64Or, // or d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100101].
- kMips64Ori, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
- kMips64Sb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
- kMips64Sd, // sd t,o(b) [111111] b[25..21] t[20..16] o[15..0].
- kMips64Seb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
- kMips64Seh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
- kMips64Sh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
- kMips64Sll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
- kMips64Sllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
- kMips64Slt, // slt d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101010].
- kMips64Slti, // slti t,s,imm16 [001010] s[25..21] t[20..16] imm16[15..0].
- kMips64Sltu, // sltu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000101011].
- kMips64Sra, // sra d,s,imm5 [00000000000] t[20..16] d[15..11] imm5[10..6] [000011].
- kMips64Srav, // srav d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000111].
- kMips64Srl, // srl d,t,a [00000000000] t[20..16] d[20..16] a[10..6] [000010].
- kMips64Srlv, // srlv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000110].
- kMips64Subu, // subu d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100011].
- kMips64Sw, // sw t,o(b) [101011] b[25..21] t[20..16] o[15..0].
- kMips64Sync, // sync kind [000000] [0000000000000000] s[10..6] [001111].
- kMips64Xor, // xor d,s,t [000000] s[25..21] t[20..16] d[15..11] [00000100110].
- kMips64Xori, // xori t,s,imm16 [001110] s[25..21] t[20..16] imm16[15..0].
- kMips64CurrPC, // jal to .+8 to materialize pc.
- kMips64Delta, // Psuedo for ori t, s, <label>-<label>.
- kMips64DeltaHi, // Pseudo for lui t, high16(<label>-<label>).
- kMips64DeltaLo, // Pseudo for ori t, s, low16(<label>-<label>).
- kMips64Undefined, // undefined [011001xxxxxxxxxxxxxxxx].
- kMips64Last
-};
-std::ostream& operator<<(std::ostream& os, const Mips64OpCode& rhs);
-
-// Instruction assembly field_loc kind.
-enum Mips64EncodingKind {
- kFmtUnused,
- kFmtBitBlt, // Bit string using end/start.
- kFmtDfp, // Double FP reg.
- kFmtSfp, // Single FP reg.
- kFmtBlt5_2, // Same 5-bit field to 2 locations.
-};
-std::ostream& operator<<(std::ostream& os, const Mips64EncodingKind& rhs);
-
-// Struct used to define the snippet positions for each MIPS64 opcode.
-struct Mips64EncodingMap {
- uint32_t skeleton;
- struct {
- Mips64EncodingKind kind;
- int end; // end for kFmtBitBlt, 1-bit slice end for FP regs.
- int start; // start for kFmtBitBlt, 4-bit slice end for FP regs.
- } field_loc[4];
- Mips64OpCode opcode;
- uint64_t flags;
- const char *name;
- const char* fmt;
- int size; // Note: size is in bytes.
-};
-
-extern Mips64EncodingMap EncodingMap[kMips64Last];
-
-#define IS_UIMM16(v) ((0 <= (v)) && ((v) <= 65535))
-#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32766))
-#define IS_SIMM16_2WORD(v) ((-32764 <= (v)) && ((v) <= 32763)) // 2 offsets must fit.
-
-} // namespace art
-
-#endif // ART_COMPILER_DEX_QUICK_MIPS64_MIPS64_LIR_H_
diff --git a/compiler/dex/quick/mips64/target_mips64.cc b/compiler/dex/quick/mips64/target_mips64.cc
deleted file mode 100644
index 6ed9617bde..0000000000
--- a/compiler/dex/quick/mips64/target_mips64.cc
+++ /dev/null
@@ -1,653 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips64.h"
-
-#include <inttypes.h>
-
-#include <string>
-
-#include "arch/mips64/instruction_set_features_mips64.h"
-#include "backend_mips64.h"
-#include "base/logging.h"
-#include "dex/compiler_ir.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "driver/compiler_driver.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-static constexpr RegStorage core_regs_arr32[] =
- {rs_rZERO, rs_rAT, rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6,
- rs_rA7, rs_rT0, rs_rT1, rs_rT2, rs_rT3, rs_rS0, rs_rS1, rs_rS2, rs_rS3, rs_rS4, rs_rS5,
- rs_rS6, rs_rS7, rs_rT8, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rFP, rs_rRA};
-static constexpr RegStorage core_regs_arr64[] =
- {rs_rZEROd, rs_rATd, rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d,
- rs_rA6d, rs_rA7d, rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rS0d, rs_rS1d, rs_rS2d, rs_rS3d,
- rs_rS4d, rs_rS5d, rs_rS6d, rs_rS7d, rs_rT8d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd,
- rs_rFPd, rs_rRAd};
-#if 0
-// TODO: f24-f31 must be saved before calls and restored after.
-static constexpr RegStorage sp_regs_arr[] =
- {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
- rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
- rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
- rs_rF31};
-static constexpr RegStorage dp_regs_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
- rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
- rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
- rs_rD31};
-#else
-static constexpr RegStorage sp_regs_arr[] =
- {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
- rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
- rs_rF21, rs_rF22, rs_rF23};
-static constexpr RegStorage dp_regs_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
- rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
- rs_rD21, rs_rD22, rs_rD23};
-#endif
-static constexpr RegStorage reserved_regs_arr32[] =
- {rs_rZERO, rs_rAT, rs_rS0, rs_rS1, rs_rT9, rs_rK0, rs_rK1, rs_rGP, rs_rSP, rs_rRA};
-static constexpr RegStorage reserved_regs_arr64[] =
- {rs_rZEROd, rs_rATd, rs_rS0d, rs_rS1d, rs_rT9d, rs_rK0d, rs_rK1d, rs_rGPd, rs_rSPd, rs_rRAd};
-static constexpr RegStorage core_temps_arr32[] =
- {rs_rV0, rs_rV1, rs_rA0, rs_rA1, rs_rA2, rs_rA3, rs_rA4, rs_rA5, rs_rA6, rs_rA7, rs_rT0,
- rs_rT1, rs_rT2, rs_rT3, rs_rT8};
-static constexpr RegStorage core_temps_arr64[] =
- {rs_rV0d, rs_rV1d, rs_rA0d, rs_rA1d, rs_rA2d, rs_rA3d, rs_rA4d, rs_rA5d, rs_rA6d, rs_rA7d,
- rs_rT0d, rs_rT1d, rs_rT2d, rs_rT3d, rs_rT8d};
-#if 0
-// TODO: f24-f31 must be saved before calls and restored after.
-static constexpr RegStorage sp_temps_arr[] =
- {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
- rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
- rs_rF21, rs_rF22, rs_rF23, rs_rF24, rs_rF25, rs_rF26, rs_rF27, rs_rF28, rs_rF29, rs_rF30,
- rs_rF31};
-static constexpr RegStorage dp_temps_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
- rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
- rs_rD21, rs_rD22, rs_rD23, rs_rD24, rs_rD25, rs_rD26, rs_rD27, rs_rD28, rs_rD29, rs_rD30,
- rs_rD31};
-#else
-static constexpr RegStorage sp_temps_arr[] =
- {rs_rF0, rs_rF1, rs_rF2, rs_rF3, rs_rF4, rs_rF5, rs_rF6, rs_rF7, rs_rF8, rs_rF9, rs_rF10,
- rs_rF11, rs_rF12, rs_rF13, rs_rF14, rs_rF15, rs_rF16, rs_rF17, rs_rF18, rs_rF19, rs_rF20,
- rs_rF21, rs_rF22, rs_rF23};
-static constexpr RegStorage dp_temps_arr[] =
- {rs_rD0, rs_rD1, rs_rD2, rs_rD3, rs_rD4, rs_rD5, rs_rD6, rs_rD7, rs_rD8, rs_rD9, rs_rD10,
- rs_rD11, rs_rD12, rs_rD13, rs_rD14, rs_rD15, rs_rD16, rs_rD17, rs_rD18, rs_rD19, rs_rD20,
- rs_rD21, rs_rD22, rs_rD23};
-#endif
-
-static constexpr ArrayRef<const RegStorage> empty_pool;
-static constexpr ArrayRef<const RegStorage> core_regs32(core_regs_arr32);
-static constexpr ArrayRef<const RegStorage> core_regs64(core_regs_arr64);
-static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
-static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
-static constexpr ArrayRef<const RegStorage> reserved_regs32(reserved_regs_arr32);
-static constexpr ArrayRef<const RegStorage> reserved_regs64(reserved_regs_arr64);
-static constexpr ArrayRef<const RegStorage> core_temps32(core_temps_arr32);
-static constexpr ArrayRef<const RegStorage> core_temps64(core_temps_arr64);
-static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
-static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
-
-RegLocation Mips64Mir2Lir::LocCReturn() {
- return mips64_loc_c_return;
-}
-
-RegLocation Mips64Mir2Lir::LocCReturnRef() {
- return mips64_loc_c_return_ref;
-}
-
-RegLocation Mips64Mir2Lir::LocCReturnWide() {
- return mips64_loc_c_return_wide;
-}
-
-RegLocation Mips64Mir2Lir::LocCReturnFloat() {
- return mips64_loc_c_return_float;
-}
-
-RegLocation Mips64Mir2Lir::LocCReturnDouble() {
- return mips64_loc_c_return_double;
-}
-
-// Return a target-dependent special register.
-RegStorage Mips64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
- RegStorage res_reg;
- switch (reg) {
- case kSelf: res_reg = rs_rS1; break;
- case kSuspend: res_reg = rs_rS0; break;
- case kLr: res_reg = rs_rRA; break;
- case kPc: res_reg = RegStorage::InvalidReg(); break;
- case kSp: res_reg = rs_rSP; break;
- case kArg0: res_reg = rs_rA0; break;
- case kArg1: res_reg = rs_rA1; break;
- case kArg2: res_reg = rs_rA2; break;
- case kArg3: res_reg = rs_rA3; break;
- case kArg4: res_reg = rs_rA4; break;
- case kArg5: res_reg = rs_rA5; break;
- case kArg6: res_reg = rs_rA6; break;
- case kArg7: res_reg = rs_rA7; break;
- case kFArg0: res_reg = rs_rF12; break;
- case kFArg1: res_reg = rs_rF13; break;
- case kFArg2: res_reg = rs_rF14; break;
- case kFArg3: res_reg = rs_rF15; break;
- case kFArg4: res_reg = rs_rF16; break;
- case kFArg5: res_reg = rs_rF17; break;
- case kFArg6: res_reg = rs_rF18; break;
- case kFArg7: res_reg = rs_rF19; break;
- case kRet0: res_reg = rs_rV0; break;
- case kRet1: res_reg = rs_rV1; break;
- case kInvokeTgt: res_reg = rs_rT9; break;
- case kHiddenArg: res_reg = rs_rT0; break;
- case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
- case kCount: res_reg = RegStorage::InvalidReg(); break;
- default: res_reg = RegStorage::InvalidReg();
- }
- return res_reg;
-}
-
-RegStorage Mips64Mir2Lir::InToRegStorageMips64Mapper::GetNextReg(ShortyArg arg) {
- const SpecialTargetRegister coreArgMappingToPhysicalReg[] =
- {kArg1, kArg2, kArg3, kArg4, kArg5, kArg6, kArg7};
- const size_t coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
- const SpecialTargetRegister fpArgMappingToPhysicalReg[] =
- {kFArg1, kFArg2, kFArg3, kFArg4, kFArg5, kFArg6, kFArg7};
- const size_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
-
- RegStorage result = RegStorage::InvalidReg();
- if (arg.IsFP()) {
- if (cur_arg_reg_ < fpArgMappingToPhysicalRegSize) {
- DCHECK(!arg.IsRef());
- result = m2l_->TargetReg(fpArgMappingToPhysicalReg[cur_arg_reg_++],
- arg.IsWide() ? kWide : kNotWide);
- }
- } else {
- if (cur_arg_reg_ < coreArgMappingToPhysicalRegSize) {
- DCHECK(!(arg.IsWide() && arg.IsRef()));
- result = m2l_->TargetReg(coreArgMappingToPhysicalReg[cur_arg_reg_++],
- arg.IsRef() ? kRef : (arg.IsWide() ? kWide : kNotWide));
- }
- }
- return result;
-}
-
-/*
- * Decode the register id.
- */
-ResourceMask Mips64Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
- return ResourceMask::Bit((reg.IsFloat() ? kMips64FPReg0 : 0) + reg.GetRegNum());
-}
-
-ResourceMask Mips64Mir2Lir::GetPCUseDefEncoding() const {
- return ResourceMask::Bit(kMips64RegPC);
-}
-
-
-void Mips64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags, ResourceMask* use_mask,
- ResourceMask* def_mask) {
- DCHECK(!lir->flags.use_def_invalid);
-
- // Mips64-specific resource map setup here.
- if (flags & REG_DEF_SP) {
- def_mask->SetBit(kMips64RegSP);
- }
-
- if (flags & REG_USE_SP) {
- use_mask->SetBit(kMips64RegSP);
- }
-
- if (flags & REG_DEF_LR) {
- def_mask->SetBit(kMips64RegLR);
- }
-}
-
-/* For dumping instructions */
-#define MIPS64_REG_COUNT 32
-static const char *mips64_reg_name[MIPS64_REG_COUNT] = {
- "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
- "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"
-};
-
-/*
- * Interpret a format string and build a string no longer than size
- * See format key in assemble_mips64.cc.
- */
-std::string Mips64Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
- std::string buf;
- int i;
- const char *fmt_end = &fmt[strlen(fmt)];
- char tbuf[256];
- char nc;
- while (fmt < fmt_end) {
- int operand;
- if (*fmt == '!') {
- fmt++;
- DCHECK_LT(fmt, fmt_end);
- nc = *fmt++;
- if (nc == '!') {
- strcpy(tbuf, "!");
- } else {
- DCHECK_LT(fmt, fmt_end);
- DCHECK_LT(static_cast<unsigned>(nc-'0'), 4u);
- operand = lir->operands[nc-'0'];
- switch (*fmt++) {
- case 'b':
- strcpy(tbuf, "0000");
- for (i = 3; i >= 0; i--) {
- tbuf[i] += operand & 1;
- operand >>= 1;
- }
- break;
- case 's':
- snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
- break;
- case 'S':
- DCHECK_EQ(RegStorage::RegNum(operand) & 1, 0);
- snprintf(tbuf, arraysize(tbuf), "$f%d", RegStorage::RegNum(operand));
- break;
- case 'h':
- snprintf(tbuf, arraysize(tbuf), "%04x", operand);
- break;
- case 'M':
- case 'd':
- snprintf(tbuf, arraysize(tbuf), "%d", operand);
- break;
- case 'D':
- snprintf(tbuf, arraysize(tbuf), "%d", operand+1);
- break;
- case 'E':
- snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
- break;
- case 'F':
- snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
- break;
- case 't':
- snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
- reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 + (operand << 1),
- lir->target);
- break;
- case 'T':
- snprintf(tbuf, arraysize(tbuf), "0x%08x", operand << 2);
- break;
- case 'u': {
- int offset_1 = lir->operands[0];
- int offset_2 = NEXT_LIR(lir)->operands[0];
- uintptr_t target =
- (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) & ~3) +
- (offset_1 << 21 >> 9) + (offset_2 << 1)) & 0xfffffffc;
- snprintf(tbuf, arraysize(tbuf), "%p", reinterpret_cast<void*>(target));
- break;
- }
-
- /* Nothing to print for BLX_2 */
- case 'v':
- strcpy(tbuf, "see above");
- break;
- case 'r':
- DCHECK(operand >= 0 && operand < MIPS64_REG_COUNT);
- strcpy(tbuf, mips64_reg_name[operand]);
- break;
- case 'N':
- // Placeholder for delay slot handling
- strcpy(tbuf, "; nop");
- break;
- default:
- strcpy(tbuf, "DecodeError");
- break;
- }
- buf += tbuf;
- }
- } else {
- buf += *fmt++;
- }
- }
- return buf;
-}
-
-// FIXME: need to redo resource maps for MIPS64 - fix this at that time.
-void Mips64Mir2Lir::DumpResourceMask(LIR *mips64_lir, const ResourceMask& mask, const char *prefix) {
- char buf[256];
- buf[0] = 0;
-
- if (mask.Equals(kEncodeAll)) {
- strcpy(buf, "all");
- } else {
- char num[8];
- int i;
-
- for (i = 0; i < kMips64RegEnd; i++) {
- if (mask.HasBit(i)) {
- snprintf(num, arraysize(num), "%d ", i);
- strcat(buf, num);
- }
- }
-
- if (mask.HasBit(ResourceMask::kCCode)) {
- strcat(buf, "cc ");
- }
- if (mask.HasBit(ResourceMask::kFPStatus)) {
- strcat(buf, "fpcc ");
- }
- // Memory bits.
- if (mips64_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
- snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
- DECODE_ALIAS_INFO_REG(mips64_lir->flags.alias_info),
- DECODE_ALIAS_INFO_WIDE(mips64_lir->flags.alias_info) ? "(+1)" : "");
- }
- if (mask.HasBit(ResourceMask::kLiteral)) {
- strcat(buf, "lit ");
- }
-
- if (mask.HasBit(ResourceMask::kHeapRef)) {
- strcat(buf, "heap ");
- }
- if (mask.HasBit(ResourceMask::kMustNotAlias)) {
- strcat(buf, "noalias ");
- }
- }
- if (buf[0]) {
- LOG(INFO) << prefix << ": " << buf;
- }
-}
-
-/*
- * TUNING: is true leaf? Can't just use METHOD_IS_LEAF to determine as some
- * instructions might call out to C/assembly helper functions. Until
- * machinery is in place, always spill lr.
- */
-
-void Mips64Mir2Lir::AdjustSpillMask() {
- core_spill_mask_ |= (1 << rs_rRA.GetRegNum());
- num_core_spills_++;
-}
-
-/* Clobber all regs that might be used by an external C call */
-void Mips64Mir2Lir::ClobberCallerSave() {
- Clobber(rs_rZEROd);
- Clobber(rs_rATd);
- Clobber(rs_rV0d);
- Clobber(rs_rV1d);
- Clobber(rs_rA0d);
- Clobber(rs_rA1d);
- Clobber(rs_rA2d);
- Clobber(rs_rA3d);
- Clobber(rs_rA4d);
- Clobber(rs_rA5d);
- Clobber(rs_rA6d);
- Clobber(rs_rA7d);
- Clobber(rs_rT0d);
- Clobber(rs_rT1d);
- Clobber(rs_rT2d);
- Clobber(rs_rT3d);
- Clobber(rs_rT8d);
- Clobber(rs_rT9d);
- Clobber(rs_rK0d);
- Clobber(rs_rK1d);
- Clobber(rs_rGPd);
- Clobber(rs_rFPd);
- Clobber(rs_rRAd);
-
- Clobber(rs_rF0);
- Clobber(rs_rF1);
- Clobber(rs_rF2);
- Clobber(rs_rF3);
- Clobber(rs_rF4);
- Clobber(rs_rF5);
- Clobber(rs_rF6);
- Clobber(rs_rF7);
- Clobber(rs_rF8);
- Clobber(rs_rF9);
- Clobber(rs_rF10);
- Clobber(rs_rF11);
- Clobber(rs_rF12);
- Clobber(rs_rF13);
- Clobber(rs_rF14);
- Clobber(rs_rF15);
- Clobber(rs_rD0);
- Clobber(rs_rD1);
- Clobber(rs_rD2);
- Clobber(rs_rD3);
- Clobber(rs_rD4);
- Clobber(rs_rD5);
- Clobber(rs_rD6);
- Clobber(rs_rD7);
-}
-
-RegLocation Mips64Mir2Lir::GetReturnWideAlt() {
- UNIMPLEMENTED(FATAL) << "No GetReturnWideAlt for MIPS64";
- RegLocation res = LocCReturnWide();
- return res;
-}
-
-RegLocation Mips64Mir2Lir::GetReturnAlt() {
- UNIMPLEMENTED(FATAL) << "No GetReturnAlt for MIPS64";
- RegLocation res = LocCReturn();
- return res;
-}
-
-/* To be used when explicitly managing register use */
-void Mips64Mir2Lir::LockCallTemps() {
- LockTemp(rs_rMIPS64_ARG0);
- LockTemp(rs_rMIPS64_ARG1);
- LockTemp(rs_rMIPS64_ARG2);
- LockTemp(rs_rMIPS64_ARG3);
- LockTemp(rs_rMIPS64_ARG4);
- LockTemp(rs_rMIPS64_ARG5);
- LockTemp(rs_rMIPS64_ARG6);
- LockTemp(rs_rMIPS64_ARG7);
-}
-
-/* To be used when explicitly managing register use */
-void Mips64Mir2Lir::FreeCallTemps() {
- FreeTemp(rs_rMIPS64_ARG0);
- FreeTemp(rs_rMIPS64_ARG1);
- FreeTemp(rs_rMIPS64_ARG2);
- FreeTemp(rs_rMIPS64_ARG3);
- FreeTemp(rs_rMIPS64_ARG4);
- FreeTemp(rs_rMIPS64_ARG5);
- FreeTemp(rs_rMIPS64_ARG6);
- FreeTemp(rs_rMIPS64_ARG7);
- FreeTemp(TargetReg(kHiddenArg));
-}
-
-bool Mips64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind ATTRIBUTE_UNUSED) {
- if (cu_->compiler_driver->GetInstructionSetFeatures()->IsSmp()) {
- NewLIR1(kMips64Sync, 0 /* Only stype currently supported */);
- return true;
- } else {
- return false;
- }
-}
-
-void Mips64Mir2Lir::CompilerInitializeRegAlloc() {
- reg_pool_.reset(new (arena_) RegisterPool(this, arena_, core_regs32, core_regs64 , sp_regs,
- dp_regs, reserved_regs32, reserved_regs64,
- core_temps32, core_temps64, sp_temps,
- dp_temps));
-
- // Target-specific adjustments.
-
- // Alias single precision floats to appropriate half of overlapping double.
- for (RegisterInfo* info : reg_pool_->sp_regs_) {
- int sp_reg_num = info->GetReg().GetRegNum();
- int dp_reg_num = sp_reg_num;
- RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | dp_reg_num);
- RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
- // Double precision register's master storage should refer to itself.
- DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
- // Redirect single precision's master storage to master.
- info->SetMaster(dp_reg_info);
- // Singles should show a single 32-bit mask bit, at first referring to the low half.
- DCHECK_EQ(info->StorageMask(), 0x1U);
- }
-
- // Alias 32bit W registers to corresponding 64bit X registers.
- for (RegisterInfo* info : reg_pool_->core_regs_) {
- int d_reg_num = info->GetReg().GetRegNum();
- RegStorage d_reg = RegStorage::Solo64(d_reg_num);
- RegisterInfo* d_reg_info = GetRegInfo(d_reg);
- // 64bit D register's master storage should refer to itself.
- DCHECK_EQ(d_reg_info, d_reg_info->Master());
- // Redirect 32bit master storage to 64bit D.
- info->SetMaster(d_reg_info);
- // 32bit should show a single 32-bit mask bit, at first referring to the low half.
- DCHECK_EQ(info->StorageMask(), 0x1U);
- }
-
- // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
- // TODO: adjust when we roll to hard float calling convention.
- reg_pool_->next_core_reg_ = 2;
- reg_pool_->next_sp_reg_ = 2;
- reg_pool_->next_dp_reg_ = 1;
-}
-
-/*
- * In the Arm code a it is typical to use the link register
- * to hold the target address. However, for Mips64 we must
- * ensure that all branch instructions can be restarted if
- * there is a trap in the shadow. Allocate a temp register.
- */
-RegStorage Mips64Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
- // NOTE: native pointer.
- LoadWordDisp(rs_rMIPS64_SELF, GetThreadOffset<8>(trampoline).Int32Value(), rs_rT9d);
- return rs_rT9d;
-}
-
-LIR* Mips64Mir2Lir::CheckSuspendUsingLoad() {
- RegStorage tmp = AllocTemp();
- // NOTE: native pointer.
- LoadWordDisp(rs_rMIPS64_SELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
- LIR *inst = LoadWordDisp(tmp, 0, tmp);
- FreeTemp(tmp);
- return inst;
-}
-
-LIR* Mips64Mir2Lir::GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest) {
- DCHECK(!r_dest.IsFloat()); // See RegClassForFieldLoadStore().
- ClobberCallerSave();
- LockCallTemps(); // Using fixed registers.
- RegStorage reg_ptr = TargetReg(kArg0);
- OpRegRegImm(kOpAdd, reg_ptr, r_base, displacement);
- RegStorage r_tgt = LoadHelper(kQuickA64Load);
- LIR *ret = OpReg(kOpBlx, r_tgt);
- OpRegCopy(r_dest, TargetReg(kRet0));
- return ret;
-}
-
-LIR* Mips64Mir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) {
- DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore().
- DCHECK(!r_src.IsPair());
- ClobberCallerSave();
- LockCallTemps(); // Using fixed registers.
- RegStorage temp_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, temp_ptr, r_base, displacement);
- RegStorage temp_value = AllocTemp();
- OpRegCopy(temp_value, r_src);
- OpRegCopy(TargetReg(kArg0), temp_ptr);
- OpRegCopy(TargetReg(kArg1), temp_value);
- FreeTemp(temp_ptr);
- FreeTemp(temp_value);
- RegStorage r_tgt = LoadHelper(kQuickA64Store);
- return OpReg(kOpBlx, r_tgt);
-}
-
-void Mips64Mir2Lir::SpillCoreRegs() {
- if (num_core_spills_ == 0) {
- return;
- }
- uint32_t mask = core_spill_mask_;
- // Start saving from offset 0 so that ra ends up on the top of the frame.
- int offset = 0;
- OpRegImm(kOpSub, rs_rSPd, num_core_spills_ * 8);
- for (int reg = 0; mask; mask >>= 1, reg++) {
- if (mask & 0x1) {
- StoreWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
- offset += 8;
- }
- }
-}
-
-void Mips64Mir2Lir::UnSpillCoreRegs() {
- if (num_core_spills_ == 0) {
- return;
- }
- uint32_t mask = core_spill_mask_;
- int offset = frame_size_ - num_core_spills_ * 8;
- for (int reg = 0; mask; mask >>= 1, reg++) {
- if (mask & 0x1) {
- LoadWordDisp(rs_rMIPS64_SP, offset, RegStorage::Solo64(reg));
- offset += 8;
- }
- }
- OpRegImm(kOpAdd, rs_rSPd, frame_size_);
-}
-
-bool Mips64Mir2Lir::IsUnconditionalBranch(LIR* lir) {
- return (lir->opcode == kMips64B);
-}
-
-RegisterClass Mips64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
- if (UNLIKELY(is_volatile)) {
- // On Mips64, atomic 64-bit load/store requires a core register.
- // Smaller aligned load/store is atomic for both core and fp registers.
- if (size == k64 || size == kDouble) {
- return kCoreReg;
- }
- }
- // TODO: Verify that both core and fp registers are suitable for smaller sizes.
- return RegClassBySize(size);
-}
-
-Mips64Mir2Lir::Mips64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
- : Mir2Lir(cu, mir_graph, arena), in_to_reg_storage_mips64_mapper_(this) {
- for (int i = 0; i < kMips64Last; i++) {
- DCHECK_EQ(Mips64Mir2Lir::EncodingMap[i].opcode, i)
- << "Encoding order for " << Mips64Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(Mips64Mir2Lir::EncodingMap[i].opcode);
- }
-}
-
-Mir2Lir* Mips64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
- ArenaAllocator* const arena) {
- return new Mips64Mir2Lir(cu, mir_graph, arena);
-}
-
-uint64_t Mips64Mir2Lir::GetTargetInstFlags(int opcode) {
- DCHECK(!IsPseudoLirOp(opcode));
- return Mips64Mir2Lir::EncodingMap[opcode].flags;
-}
-
-const char* Mips64Mir2Lir::GetTargetInstName(int opcode) {
- DCHECK(!IsPseudoLirOp(opcode));
- return Mips64Mir2Lir::EncodingMap[opcode].name;
-}
-
-const char* Mips64Mir2Lir::GetTargetInstFmt(int opcode) {
- DCHECK(!IsPseudoLirOp(opcode));
- return Mips64Mir2Lir::EncodingMap[opcode].fmt;
-}
-
-void Mips64Mir2Lir::GenBreakpoint(int code) {
- NewLIR1(kMips64Break, code);
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/mips64/utility_mips64.cc b/compiler/dex/quick/mips64/utility_mips64.cc
deleted file mode 100644
index 38e354cbde..0000000000
--- a/compiler/dex/quick/mips64/utility_mips64.cc
+++ /dev/null
@@ -1,875 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "codegen_mips64.h"
-
-#include "arch/mips64/instruction_set_features_mips64.h"
-#include "base/logging.h"
-#include "dex/quick/mir_to_lir-inl.h"
-#include "dex/reg_storage_eq.h"
-#include "driver/compiler_driver.h"
-#include "mips64_lir.h"
-
-namespace art {
-
-/* This file contains codegen for the MIPS64 ISA. */
-
-LIR* Mips64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
- int opcode;
- // Must be both DOUBLE or both not DOUBLE.
- DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
- if (r_dest.Is64Bit()) {
- if (r_dest.IsDouble()) {
- if (r_src.IsDouble()) {
- opcode = kMips64Fmovd;
- } else {
- // Note the operands are swapped for the dmtc1 instr.
- RegStorage t_opnd = r_src;
- r_src = r_dest;
- r_dest = t_opnd;
- opcode = kMips64Dmtc1;
- }
- } else {
- DCHECK(r_src.IsDouble());
- opcode = kMips64Dmfc1;
- }
- } else {
- if (r_dest.IsSingle()) {
- if (r_src.IsSingle()) {
- opcode = kMips64Fmovs;
- } else {
- // Note the operands are swapped for the mtc1 instr.
- RegStorage t_opnd = r_src;
- r_src = r_dest;
- r_dest = t_opnd;
- opcode = kMips64Mtc1;
- }
- } else {
- DCHECK(r_src.IsSingle());
- opcode = kMips64Mfc1;
- }
- }
- LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
- if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
- res->flags.is_nop = true;
- }
- return res;
-}
-
-bool Mips64Mir2Lir::InexpensiveConstantInt(int32_t value) {
- // For encodings, see LoadConstantNoClobber below.
- return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
-}
-
-bool Mips64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
- UNUSED(value);
- return false; // TUNING
-}
-
-bool Mips64Mir2Lir::InexpensiveConstantLong(int64_t value) {
- UNUSED(value);
- return false; // TUNING
-}
-
-bool Mips64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
- UNUSED(value);
- return false; // TUNING
-}
-
-/*
- * Load a immediate using a shortcut if possible; otherwise
- * grab from the per-translation literal pool. If target is
- * a high register, build constant into a low register and copy.
- *
- * No additional register clobbering operation performed. Use this version when
- * 1) r_dest is freshly returned from AllocTemp or
- * 2) The codegen is under fixed register usage
- */
-LIR* Mips64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
- LIR *res;
-
- RegStorage r_dest_save = r_dest;
- int is_fp_reg = r_dest.IsFloat();
- if (is_fp_reg) {
- DCHECK(r_dest.IsSingle());
- r_dest = AllocTemp();
- }
-
- // See if the value can be constructed cheaply.
- if (value == 0) {
- res = NewLIR2(kMips64Move, r_dest.GetReg(), rZERO);
- } else if (IsUint<16>(value)) {
- // Use OR with (unsigned) immediate to encode 16b unsigned int.
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZERO, value);
- } else if (IsInt<16>(value)) {
- // Use ADD with (signed) immediate to encode 16b signed int.
- res = NewLIR3(kMips64Addiu, r_dest.GetReg(), rZERO, value);
- } else {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- if (value & 0xffff)
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
- }
-
- if (is_fp_reg) {
- NewLIR2(kMips64Mtc1, r_dest.GetReg(), r_dest_save.GetReg());
- FreeTemp(r_dest);
- }
-
- return res;
-}
-
-LIR* Mips64Mir2Lir::OpUnconditionalBranch(LIR* target) {
- LIR* res = NewLIR1(kMips64B, 0 /* offset to be patched during assembly*/);
- res->target = target;
- return res;
-}
-
-LIR* Mips64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
- Mips64OpCode opcode = kMips64Nop;
- switch (op) {
- case kOpBlx:
- opcode = kMips64Jalr;
- break;
- case kOpBx:
- return NewLIR2(kMips64Jalr, rZERO, r_dest_src.GetReg());
- break;
- default:
- LOG(FATAL) << "Bad case in OpReg";
- }
- return NewLIR2(opcode, rRAd, r_dest_src.GetReg());
-}
-
-LIR* Mips64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
- LIR *res;
- bool neg = (value < 0);
- int abs_value = (neg) ? -value : value;
- bool short_form = (abs_value & 0xff) == abs_value;
- bool is64bit = r_dest_src1.Is64Bit();
- RegStorage r_scratch;
- Mips64OpCode opcode = kMips64Nop;
- switch (op) {
- case kOpAdd:
- return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
- case kOpSub:
- return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
- default:
- LOG(FATAL) << "Bad case in OpRegImm";
- }
- if (short_form) {
- res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
- } else {
- if (is64bit) {
- r_scratch = AllocTempWide();
- res = LoadConstantWide(r_scratch, value);
- } else {
- r_scratch = AllocTemp();
- res = LoadConstant(r_scratch, value);
- }
- if (op == kOpCmp) {
- NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
- } else {
- NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
- }
- }
- return res;
-}
-
-LIR* Mips64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest,
- RegStorage r_src1, RegStorage r_src2) {
- Mips64OpCode opcode = kMips64Nop;
- bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit();
-
- switch (op) {
- case kOpAdd:
- if (is64bit) {
- opcode = kMips64Daddu;
- } else {
- opcode = kMips64Addu;
- }
- break;
- case kOpSub:
- if (is64bit) {
- opcode = kMips64Dsubu;
- } else {
- opcode = kMips64Subu;
- }
- break;
- case kOpAnd:
- opcode = kMips64And;
- break;
- case kOpMul:
- opcode = kMips64Mul;
- break;
- case kOpOr:
- opcode = kMips64Or;
- break;
- case kOpXor:
- opcode = kMips64Xor;
- break;
- case kOpLsl:
- if (is64bit) {
- opcode = kMips64Dsllv;
- } else {
- opcode = kMips64Sllv;
- }
- break;
- case kOpLsr:
- if (is64bit) {
- opcode = kMips64Dsrlv;
- } else {
- opcode = kMips64Srlv;
- }
- break;
- case kOpAsr:
- if (is64bit) {
- opcode = kMips64Dsrav;
- } else {
- opcode = kMips64Srav;
- }
- break;
- case kOpAdc:
- case kOpSbc:
- LOG(FATAL) << "No carry bit on MIPS64";
- break;
- default:
- LOG(FATAL) << "Bad case in OpRegRegReg";
- break;
- }
- return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
-}
-
-LIR* Mips64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
- LIR *res;
- Mips64OpCode opcode = kMips64Nop;
- bool short_form = true;
- bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit();
-
- switch (op) {
- case kOpAdd:
- if (is64bit) {
- if (IS_SIMM16(value)) {
- opcode = kMips64Daddiu;
- } else {
- short_form = false;
- opcode = kMips64Daddu;
- }
- } else {
- if (IS_SIMM16(value)) {
- opcode = kMips64Addiu;
- } else {
- short_form = false;
- opcode = kMips64Addu;
- }
- }
- break;
- case kOpSub:
- if (is64bit) {
- if (IS_SIMM16((-value))) {
- value = -value;
- opcode = kMips64Daddiu;
- } else {
- short_form = false;
- opcode = kMips64Dsubu;
- }
- } else {
- if (IS_SIMM16((-value))) {
- value = -value;
- opcode = kMips64Addiu;
- } else {
- short_form = false;
- opcode = kMips64Subu;
- }
- }
- break;
- case kOpLsl:
- if (is64bit) {
- DCHECK(value >= 0 && value <= 63);
- if (value >= 0 && value <= 31) {
- opcode = kMips64Dsll;
- } else {
- opcode = kMips64Dsll32;
- value = value - 32;
- }
- } else {
- DCHECK(value >= 0 && value <= 31);
- opcode = kMips64Sll;
- }
- break;
- case kOpLsr:
- if (is64bit) {
- DCHECK(value >= 0 && value <= 63);
- if (value >= 0 && value <= 31) {
- opcode = kMips64Dsrl;
- } else {
- opcode = kMips64Dsrl32;
- value = value - 32;
- }
- } else {
- DCHECK(value >= 0 && value <= 31);
- opcode = kMips64Srl;
- }
- break;
- case kOpAsr:
- if (is64bit) {
- DCHECK(value >= 0 && value <= 63);
- if (value >= 0 && value <= 31) {
- opcode = kMips64Dsra;
- } else {
- opcode = kMips64Dsra32;
- value = value - 32;
- }
- } else {
- DCHECK(value >= 0 && value <= 31);
- opcode = kMips64Sra;
- }
- break;
- case kOpAnd:
- if (IS_UIMM16((value))) {
- opcode = kMips64Andi;
- } else {
- short_form = false;
- opcode = kMips64And;
- }
- break;
- case kOpOr:
- if (IS_UIMM16((value))) {
- opcode = kMips64Ori;
- } else {
- short_form = false;
- opcode = kMips64Or;
- }
- break;
- case kOpXor:
- if (IS_UIMM16((value))) {
- opcode = kMips64Xori;
- } else {
- short_form = false;
- opcode = kMips64Xor;
- }
- break;
- case kOpMul:
- short_form = false;
- opcode = kMips64Mul;
- break;
- default:
- LOG(FATAL) << "Bad case in OpRegRegImm";
- break;
- }
-
- if (short_form) {
- res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
- } else {
- if (r_dest != r_src1) {
- res = LoadConstant(r_dest, value);
- NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
- } else {
- if (is64bit) {
- RegStorage r_scratch = AllocTempWide();
- res = LoadConstantWide(r_scratch, value);
- NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
- } else {
- RegStorage r_scratch = AllocTemp();
- res = LoadConstant(r_scratch, value);
- NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
- }
- }
- }
- return res;
-}
-
-LIR* Mips64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
- Mips64OpCode opcode = kMips64Nop;
- LIR *res;
- switch (op) {
- case kOpMov:
- opcode = kMips64Move;
- break;
- case kOpMvn:
- return NewLIR3(kMips64Nor, r_dest_src1.GetReg(), r_src2.GetReg(), rZEROd);
- case kOpNeg:
- if (r_dest_src1.Is64Bit())
- return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
- else
- return NewLIR3(kMips64Subu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
- case kOpAdd:
- case kOpAnd:
- case kOpMul:
- case kOpOr:
- case kOpSub:
- case kOpXor:
- return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
- case kOp2Byte:
- res = NewLIR2(kMips64Seb, r_dest_src1.GetReg(), r_src2.GetReg());
- return res;
- case kOp2Short:
- res = NewLIR2(kMips64Seh, r_dest_src1.GetReg(), r_src2.GetReg());
- return res;
- case kOp2Char:
- return NewLIR3(kMips64Andi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
- default:
- LOG(FATAL) << "Bad case in OpRegReg";
- UNREACHABLE();
- }
- return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
-}
-
-LIR* Mips64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
- MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
- UNIMPLEMENTED(FATAL);
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset,
- RegStorage r_src, MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
- UNIMPLEMENTED(FATAL);
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc,
- RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
- LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
- LIR *res = nullptr;
- DCHECK(r_dest.Is64Bit());
- RegStorage r_dest_save = r_dest;
- int is_fp_reg = r_dest.IsFloat();
- if (is_fp_reg) {
- DCHECK(r_dest.IsDouble());
- r_dest = AllocTemp();
- }
-
- int bit31 = (value & UINT64_C(0x80000000)) != 0;
-
- // Loads with 1 instruction.
- if (IsUint<16>(value)) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
- } else if (IsInt<16>(value)) {
- res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
- } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- } else if (IsInt<32>(value)) {
- // Loads with 2 instructions.
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
- } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
- NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
- } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
- NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
- } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
- (value >> 32) <= (32767 - bit31)) {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
- } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
- NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
- } else {
- int64_t tmp = value;
- int shift_cnt = 0;
- while ((tmp & 1) == 0) {
- tmp >>= 1;
- shift_cnt++;
- }
-
- if (IsUint<16>(tmp)) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- } else if (IsInt<16>(tmp)) {
- res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- } else if (IsInt<32>(tmp)) {
- // Loads with 3 instructions.
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp >> 16);
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- } else {
- tmp = value >> 16;
- shift_cnt = 16;
- while ((tmp & 1) == 0) {
- tmp >>= 1;
- shift_cnt++;
- }
-
- if (IsUint<16>(tmp)) {
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
- } else if (IsInt<16>(tmp)) {
- res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
- NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
- shift_cnt & 0x1F);
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
- } else {
- // Loads with 3-4 instructions.
- uint64_t tmp2 = value;
- if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
- res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp2 >> 16);
- }
- if ((tmp2 & 0xFFFF) != 0) {
- if (res)
- NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp2);
- else
- res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp2);
- }
- if (bit31) {
- tmp2 += UINT64_C(0x100000000);
- }
- if (((tmp2 >> 32) & 0xFFFF) != 0) {
- NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
- }
- if (tmp2 & UINT64_C(0x800000000000)) {
- tmp2 += UINT64_C(0x1000000000000);
- }
- if ((tmp2 >> 48) != 0) {
- NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
- }
- }
- }
- }
-
- if (is_fp_reg) {
- NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
- FreeTemp(r_dest);
- }
-
- return res;
-}
-
-/* Load value from base + scaled index. */
-LIR* Mips64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
- int scale, OpSize size) {
- LIR *first = NULL;
- LIR *res;
- RegStorage t_reg;
- Mips64OpCode opcode = kMips64Nop;
- bool is64bit = r_dest.Is64Bit();
- if (is64bit) {
- t_reg = AllocTempWide();
- } else {
- t_reg = AllocTemp();
- }
-
- if (r_dest.IsFloat()) {
- DCHECK(r_dest.IsSingle());
- DCHECK((size == k32) || (size == kSingle) || (size == kReference));
- size = kSingle;
- } else if (is64bit) {
- size = k64;
- } else {
- if (size == kSingle)
- size = k32;
- }
-
- if (!scale) {
- if (is64bit) {
- first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
- } else {
- first = NewLIR3(kMips64Addu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
- }
- } else {
- first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
- }
-
- switch (size) {
- case k64:
- opcode = kMips64Ld;
- break;
- case kSingle:
- opcode = kMips64Flwc1;
- break;
- case k32:
- case kReference:
- opcode = kMips64Lw;
- break;
- case kUnsignedHalf:
- opcode = kMips64Lhu;
- break;
- case kSignedHalf:
- opcode = kMips64Lh;
- break;
- case kUnsignedByte:
- opcode = kMips64Lbu;
- break;
- case kSignedByte:
- opcode = kMips64Lb;
- break;
- default:
- LOG(FATAL) << "Bad case in LoadBaseIndexed";
- }
-
- res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
- FreeTemp(t_reg);
- return (first) ? first : res;
-}
-
-/* Store value base base + scaled index. */
-LIR* Mips64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
- int scale, OpSize size) {
- LIR *first = NULL;
- Mips64OpCode opcode = kMips64Nop;
- RegStorage t_reg = AllocTemp();
-
- if (r_src.IsFloat()) {
- DCHECK(r_src.IsSingle());
- DCHECK((size == k32) || (size == kSingle) || (size == kReference));
- size = kSingle;
- } else {
- if (size == kSingle)
- size = k32;
- }
-
- if (!scale) {
- first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
- } else {
- first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
- NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
- }
-
- switch (size) {
- case kSingle:
- opcode = kMips64Fswc1;
- break;
- case k32:
- case kReference:
- opcode = kMips64Sw;
- break;
- case kUnsignedHalf:
- case kSignedHalf:
- opcode = kMips64Sh;
- break;
- case kUnsignedByte:
- case kSignedByte:
- opcode = kMips64Sb;
- break;
- default:
- LOG(FATAL) << "Bad case in StoreBaseIndexed";
- }
- NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
- return first;
-}
-
-// FIXME: don't split r_dest into 2 containers.
-LIR* Mips64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size) {
-/*
- * Load value from base + displacement. Optionally perform null check
- * on base (which must have an associated s_reg and MIR). If not
- * performing null check, incoming MIR can be null. IMPORTANT: this
- * code must not allocate any new temps. If a new register is needed
- * and base and dest are the same, spill some other register to
- * rlp and then restore.
- */
- LIR *res;
- LIR *load = NULL;
- Mips64OpCode opcode = kMips64Nop;
- bool short_form = IS_SIMM16(displacement);
-
- switch (size) {
- case k64:
- case kDouble:
- r_dest = Check64BitReg(r_dest);
- if (!r_dest.IsFloat())
- opcode = kMips64Ld;
- else
- opcode = kMips64Fldc1;
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case k32:
- case kSingle:
- case kReference:
- opcode = kMips64Lw;
- if (r_dest.IsFloat()) {
- opcode = kMips64Flwc1;
- DCHECK(r_dest.IsSingle());
- }
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case kUnsignedHalf:
- opcode = kMips64Lhu;
- DCHECK_EQ((displacement & 0x1), 0);
- break;
- case kSignedHalf:
- opcode = kMips64Lh;
- DCHECK_EQ((displacement & 0x1), 0);
- break;
- case kUnsignedByte:
- opcode = kMips64Lbu;
- break;
- case kSignedByte:
- opcode = kMips64Lb;
- break;
- default:
- LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
- }
-
- if (short_form) {
- load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
- } else {
- RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
- res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
- load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
- if (r_tmp != r_dest)
- FreeTemp(r_tmp);
- }
-
- if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK_EQ(r_base, rs_rMIPS64_SP);
- AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
- }
- return res;
-}
-
-LIR* Mips64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- OpSize size, VolatileKind is_volatile) {
- if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
- displacement & 0x7)) {
- // TODO: use lld/scd instructions for Mips64.
- // Do atomic 64-bit load.
- return GenAtomic64Load(r_base, displacement, r_dest);
- }
-
- // TODO: base this on target.
- if (size == kWord) {
- size = k64;
- }
- LIR* load;
- load = LoadBaseDispBody(r_base, displacement, r_dest, size);
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- GenMemBarrier(kLoadAny);
- }
-
- return load;
-}
-
-// FIXME: don't split r_dest into 2 containers.
-LIR* Mips64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size) {
- LIR *res;
- LIR *store = NULL;
- Mips64OpCode opcode = kMips64Nop;
- bool short_form = IS_SIMM16(displacement);
-
- switch (size) {
- case k64:
- case kDouble:
- r_src = Check64BitReg(r_src);
- if (!r_src.IsFloat())
- opcode = kMips64Sd;
- else
- opcode = kMips64Fsdc1;
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case k32:
- case kSingle:
- case kReference:
- opcode = kMips64Sw;
- if (r_src.IsFloat()) {
- opcode = kMips64Fswc1;
- DCHECK(r_src.IsSingle());
- }
- DCHECK_EQ((displacement & 0x3), 0);
- break;
- case kUnsignedHalf:
- case kSignedHalf:
- opcode = kMips64Sh;
- DCHECK_EQ((displacement & 0x1), 0);
- break;
- case kUnsignedByte:
- case kSignedByte:
- opcode = kMips64Sb;
- break;
- default:
- LOG(FATAL) << "Bad case in StoreBaseDispBody";
- }
-
- if (short_form) {
- store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
- } else {
- RegStorage r_scratch = AllocTemp();
- res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
- store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
- FreeTemp(r_scratch);
- }
-
- if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK_EQ(r_base, rs_rMIPS64_SP);
- AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
- }
-
- return res;
-}
-
-LIR* Mips64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
- OpSize size, VolatileKind is_volatile) {
- if (is_volatile == kVolatile) {
- // Ensure that prior accesses become visible to other threads first.
- GenMemBarrier(kAnyStore);
- }
-
- LIR* store;
- if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
- displacement & 0x7)) {
- // TODO - use lld/scd instructions for Mips64
- // Do atomic 64-bit load.
- store = GenAtomic64Store(r_base, displacement, r_src);
- } else {
- // TODO: base this on target.
- if (size == kWord) {
- size = k64;
- }
- store = StoreBaseDispBody(r_base, displacement, r_src, size);
- }
-
- if (UNLIKELY(is_volatile == kVolatile)) {
- // Preserve order with respect to any subsequent volatile loads.
- // We need StoreLoad, but that generally requires the most expensive barrier.
- GenMemBarrier(kAnyAny);
- }
-
- return store;
-}
-
-LIR* Mips64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
- LOG(FATAL) << "Unexpected use of OpMem for MIPS64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
- UNUSED(cc, target);
- LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS64";
- UNREACHABLE();
-}
-
-LIR* Mips64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
- return OpReg(op, r_tgt);
-}
-
-} // namespace art
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index d4ad0c2daa..8baafc7fd2 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -45,7 +45,6 @@
#include "dex/quick/arm/backend_arm.h"
#include "dex/quick/arm64/backend_arm64.h"
#include "dex/quick/mips/backend_mips.h"
-#include "dex/quick/mips64/backend_mips64.h"
#include "dex/quick/x86/backend_x86.h"
namespace art {
@@ -814,10 +813,9 @@ Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_
mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
case kMips:
- mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
+ // Fall-through.
case kMips64:
- mir_to_lir = Mips64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
case kX86:
// Fall-through.
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index cdf71b642e..d692d26229 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -27,8 +27,9 @@ class DexFileToMethodInlinerMap;
class QuickCompilerCallbacks FINAL : public CompilerCallbacks {
public:
QuickCompilerCallbacks(VerificationResults* verification_results,
- DexFileToMethodInlinerMap* method_inliner_map)
- : verification_results_(verification_results),
+ DexFileToMethodInlinerMap* method_inliner_map,
+ CompilerCallbacks::CallbackMode mode)
+ : CompilerCallbacks(mode), verification_results_(verification_results),
method_inliner_map_(method_inliner_map) {
CHECK(verification_results != nullptr);
CHECK(method_inliner_map != nullptr);
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index a02e25edde..5ebc029fcf 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -41,7 +41,7 @@ class CompilerDriverTest : public CommonCompilerTest {
TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
TimingLogger::ScopedTiming t(__FUNCTION__, &timings);
compiler_driver_->CompileAll(class_loader,
- Runtime::Current()->GetCompileTimeClassPath(class_loader),
+ GetDexFiles(class_loader),
&timings);
t.NewTiming("MakeAllExecutable");
MakeAllExecutable(class_loader);
@@ -66,8 +66,7 @@ class CompilerDriverTest : public CommonCompilerTest {
}
void MakeAllExecutable(jobject class_loader) {
- const std::vector<const DexFile*>& class_path
- = Runtime::Current()->GetCompileTimeClassPath(class_loader);
+ const std::vector<const DexFile*> class_path = GetDexFiles(class_loader);
for (size_t i = 0; i != class_path.size(); ++i) {
const DexFile* dex_file = class_path[i];
CHECK(dex_file != NULL);
diff --git a/compiler/dwarf/debug_frame_opcode_writer.h b/compiler/dwarf/debug_frame_opcode_writer.h
new file mode 100644
index 0000000000..cc4ef8fde1
--- /dev/null
+++ b/compiler/dwarf/debug_frame_opcode_writer.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
+#define ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
+
+#include "dwarf.h"
+#include "register.h"
+#include "writer.h"
+
+namespace art {
+namespace dwarf {
+
+// Writer for .debug_frame opcodes (DWARF-3).
+// See the DWARF specification for the precise meaning of the opcodes.
+// The writer is very light-weight, however it will do the following for you:
+// * Choose the most compact encoding of a given opcode.
+// * Keep track of current state and convert absolute values to deltas.
+// * Divide by header-defined factors as appropriate.
+template<typename Allocator = std::allocator<uint8_t> >
+class DebugFrameOpCodeWriter : private Writer<Allocator> {
+ public:
+ // To save space, DWARF divides most offsets by header-defined factors.
+ // They are used in integer divisions, so we make them constants.
+ // We usually subtract from stack base pointer, so making the factor
+ // negative makes the encoded values positive and thus easier to encode.
+ static constexpr int kDataAlignmentFactor = -4;
+ static constexpr int kCodeAlignmentFactor = 1;
+
+ // Explicitely advance the program counter to given location.
+ void AdvancePC(int absolute_pc) {
+ DCHECK_GE(absolute_pc, current_pc_);
+ int delta = FactorCodeOffset(absolute_pc - current_pc_);
+ if (delta != 0) {
+ if (delta <= 0x3F) {
+ this->PushUint8(DW_CFA_advance_loc | delta);
+ } else if (delta <= UINT8_MAX) {
+ this->PushUint8(DW_CFA_advance_loc1);
+ this->PushUint8(delta);
+ } else if (delta <= UINT16_MAX) {
+ this->PushUint8(DW_CFA_advance_loc2);
+ this->PushUint16(delta);
+ } else {
+ this->PushUint8(DW_CFA_advance_loc4);
+ this->PushUint32(delta);
+ }
+ }
+ current_pc_ = absolute_pc;
+ }
+
+ // Override this method to automatically advance the PC before each opcode.
+ virtual void ImplicitlyAdvancePC() { }
+
+ // Common alias in assemblers - spill relative to current stack pointer.
+ void RelOffset(Reg reg, int offset) {
+ Offset(reg, offset - current_cfa_offset_);
+ }
+
+ // Common alias in assemblers - increase stack frame size.
+ void AdjustCFAOffset(int delta) {
+ DefCFAOffset(current_cfa_offset_ + delta);
+ }
+
+ // Custom alias - spill many registers based on bitmask.
+ void RelOffsetForMany(Reg reg_base, int offset, uint32_t reg_mask,
+ int reg_size) {
+ DCHECK(reg_size == 4 || reg_size == 8);
+ for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) {
+ if ((reg_mask & 1) != 0u) {
+ RelOffset(Reg(reg_base.num() + i), offset);
+ offset += reg_size;
+ }
+ }
+ }
+
+ // Custom alias - unspill many registers based on bitmask.
+ void RestoreMany(Reg reg_base, uint32_t reg_mask) {
+ for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) {
+ if ((reg_mask & 1) != 0u) {
+ Restore(Reg(reg_base.num() + i));
+ }
+ }
+ }
+
+ void Nop() {
+ this->PushUint8(DW_CFA_nop);
+ }
+
+ void Offset(Reg reg, int offset) {
+ ImplicitlyAdvancePC();
+ int factored_offset = FactorDataOffset(offset); // May change sign.
+ if (factored_offset >= 0) {
+ if (0 <= reg.num() && reg.num() <= 0x3F) {
+ this->PushUint8(DW_CFA_offset | reg.num());
+ this->PushUleb128(factored_offset);
+ } else {
+ this->PushUint8(DW_CFA_offset_extended);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(factored_offset);
+ }
+ } else {
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_offset_extended_sf);
+ this->PushUleb128(reg.num());
+ this->PushSleb128(factored_offset);
+ }
+ }
+
+ void Restore(Reg reg) {
+ ImplicitlyAdvancePC();
+ if (0 <= reg.num() && reg.num() <= 0x3F) {
+ this->PushUint8(DW_CFA_restore | reg.num());
+ } else {
+ this->PushUint8(DW_CFA_restore_extended);
+ this->PushUleb128(reg.num());
+ }
+ }
+
+ void Undefined(Reg reg) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_undefined);
+ this->PushUleb128(reg.num());
+ }
+
+ void SameValue(Reg reg) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_same_value);
+ this->PushUleb128(reg.num());
+ }
+
+ // The previous value of "reg" is stored in register "new_reg".
+ void Register(Reg reg, Reg new_reg) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_register);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(new_reg.num());
+ }
+
+ void RememberState() {
+ // Note that we do not need to advance the PC.
+ this->PushUint8(DW_CFA_remember_state);
+ }
+
+ void RestoreState() {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_restore_state);
+ }
+
+ void DefCFA(Reg reg, int offset) {
+ ImplicitlyAdvancePC();
+ if (offset >= 0) {
+ this->PushUint8(DW_CFA_def_cfa);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(offset); // Non-factored.
+ } else {
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_def_cfa_sf);
+ this->PushUleb128(reg.num());
+ this->PushSleb128(FactorDataOffset(offset));
+ }
+ current_cfa_offset_ = offset;
+ }
+
+ void DefCFARegister(Reg reg) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_def_cfa_register);
+ this->PushUleb128(reg.num());
+ }
+
+ void DefCFAOffset(int offset) {
+ if (current_cfa_offset_ != offset) {
+ ImplicitlyAdvancePC();
+ if (offset >= 0) {
+ this->PushUint8(DW_CFA_def_cfa_offset);
+ this->PushUleb128(offset); // Non-factored.
+ } else {
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_def_cfa_offset_sf);
+ this->PushSleb128(FactorDataOffset(offset));
+ }
+ current_cfa_offset_ = offset;
+ }
+ }
+
+ void ValOffset(Reg reg, int offset) {
+ ImplicitlyAdvancePC();
+ uses_dwarf3_features_ = true;
+ int factored_offset = FactorDataOffset(offset); // May change sign.
+ if (factored_offset >= 0) {
+ this->PushUint8(DW_CFA_val_offset);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(factored_offset);
+ } else {
+ this->PushUint8(DW_CFA_val_offset_sf);
+ this->PushUleb128(reg.num());
+ this->PushSleb128(factored_offset);
+ }
+ }
+
+ void DefCFAExpression(void* expr, int expr_size) {
+ ImplicitlyAdvancePC();
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_def_cfa_expression);
+ this->PushUleb128(expr_size);
+ this->PushData(expr, expr_size);
+ }
+
+ void Expression(Reg reg, void* expr, int expr_size) {
+ ImplicitlyAdvancePC();
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_expression);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(expr_size);
+ this->PushData(expr, expr_size);
+ }
+
+ void ValExpression(Reg reg, void* expr, int expr_size) {
+ ImplicitlyAdvancePC();
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_val_expression);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(expr_size);
+ this->PushData(expr, expr_size);
+ }
+
+ int GetCurrentCFAOffset() const {
+ return current_cfa_offset_;
+ }
+
+ void SetCurrentCFAOffset(int offset) {
+ current_cfa_offset_ = offset;
+ }
+
+ using Writer<Allocator>::data;
+
+ DebugFrameOpCodeWriter(const Allocator& alloc = Allocator())
+ : Writer<Allocator>(&opcodes_),
+ opcodes_(alloc),
+ current_cfa_offset_(0),
+ current_pc_(0),
+ uses_dwarf3_features_(false) {
+ }
+
+ virtual ~DebugFrameOpCodeWriter() { }
+
+ protected:
+ int FactorDataOffset(int offset) const {
+ DCHECK_EQ(offset % kDataAlignmentFactor, 0);
+ return offset / kDataAlignmentFactor;
+ }
+
+ int FactorCodeOffset(int offset) const {
+ DCHECK_EQ(offset % kCodeAlignmentFactor, 0);
+ return offset / kCodeAlignmentFactor;
+ }
+
+ std::vector<uint8_t, Allocator> opcodes_;
+ int current_cfa_offset_;
+ int current_pc_;
+ bool uses_dwarf3_features_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DebugFrameOpCodeWriter);
+};
+
+} // namespace dwarf
+} // namespace art
+
+#endif // ART_COMPILER_DWARF_DEBUG_FRAME_OPCODE_WRITER_H_
diff --git a/compiler/dwarf/debug_frame_writer.h b/compiler/dwarf/debug_frame_writer.h
new file mode 100644
index 0000000000..6de45f5526
--- /dev/null
+++ b/compiler/dwarf/debug_frame_writer.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DWARF_DEBUG_FRAME_WRITER_H_
+#define ART_COMPILER_DWARF_DEBUG_FRAME_WRITER_H_
+
+#include "debug_frame_opcode_writer.h"
+#include "dwarf.h"
+#include "writer.h"
+
+namespace art {
+namespace dwarf {
+
+// Writer for the .eh_frame section (which extends .debug_frame specification).
+template<typename Allocator = std::allocator<uint8_t>>
+class DebugFrameWriter FINAL : private Writer<Allocator> {
+ public:
+ void WriteCIE(Reg return_address_register,
+ const uint8_t* initial_opcodes,
+ int initial_opcodes_size) {
+ DCHECK(cie_header_start_ == ~0u);
+ cie_header_start_ = this->data()->size();
+ this->PushUint32(0); // Length placeholder.
+ this->PushUint32(0); // CIE id.
+ this->PushUint8(1); // Version.
+ this->PushString("zR");
+ this->PushUleb128(DebugFrameOpCodeWriter<Allocator>::kCodeAlignmentFactor);
+ this->PushSleb128(DebugFrameOpCodeWriter<Allocator>::kDataAlignmentFactor);
+ this->PushUleb128(return_address_register.num()); // ubyte in DWARF2.
+ this->PushUleb128(1); // z: Augmentation data size.
+ if (use_64bit_address_) {
+ this->PushUint8(0x04); // R: ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata8).
+ } else {
+ this->PushUint8(0x03); // R: ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
+ }
+ this->PushData(initial_opcodes, initial_opcodes_size);
+ this->Pad(use_64bit_address_ ? 8 : 4);
+ this->UpdateUint32(cie_header_start_, this->data()->size() - cie_header_start_ - 4);
+ }
+
+ void WriteCIE(Reg return_address_register,
+ const DebugFrameOpCodeWriter<Allocator>& opcodes) {
+ WriteCIE(return_address_register, opcodes.data()->data(), opcodes.data()->size());
+ }
+
+ void WriteFDE(uint64_t initial_address,
+ uint64_t address_range,
+ const uint8_t* unwind_opcodes,
+ int unwind_opcodes_size) {
+ DCHECK(cie_header_start_ != ~0u);
+ size_t fde_header_start = this->data()->size();
+ this->PushUint32(0); // Length placeholder.
+ this->PushUint32(this->data()->size() - cie_header_start_); // 'CIE_pointer'
+ if (use_64bit_address_) {
+ this->PushUint64(initial_address);
+ this->PushUint64(address_range);
+ } else {
+ this->PushUint32(initial_address);
+ this->PushUint32(address_range);
+ }
+ this->PushUleb128(0); // Augmentation data size.
+ this->PushData(unwind_opcodes, unwind_opcodes_size);
+ this->Pad(use_64bit_address_ ? 8 : 4);
+ this->UpdateUint32(fde_header_start, this->data()->size() - fde_header_start - 4);
+ }
+
+ DebugFrameWriter(std::vector<uint8_t, Allocator>* buffer, bool use_64bit_address)
+ : Writer<Allocator>(buffer),
+ use_64bit_address_(use_64bit_address),
+ cie_header_start_(~0u) {
+ }
+
+ private:
+ bool use_64bit_address_;
+ size_t cie_header_start_;
+
+ DISALLOW_COPY_AND_ASSIGN(DebugFrameWriter);
+};
+
+} // namespace dwarf
+} // namespace art
+
+#endif // ART_COMPILER_DWARF_DEBUG_FRAME_WRITER_H_
diff --git a/compiler/dwarf/debug_line_opcode_writer.h b/compiler/dwarf/debug_line_opcode_writer.h
new file mode 100644
index 0000000000..f34acee647
--- /dev/null
+++ b/compiler/dwarf/debug_line_opcode_writer.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
+#define ART_COMPILER_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
+
+#include "dwarf.h"
+#include "writer.h"
+
+namespace art {
+namespace dwarf {
+
+// Writer for the .debug_line opcodes (DWARF-3).
+// The writer is very light-weight, however it will do the following for you:
+// * Choose the most compact encoding of a given opcode.
+// * Keep track of current state and convert absolute values to deltas.
+// * Divide by header-defined factors as appropriate.
+template<typename Allocator = std::allocator<uint8_t>>
+class DebugLineOpCodeWriter FINAL : private Writer<Allocator> {
+ public:
+ static constexpr int kOpcodeBase = 13;
+ static constexpr bool kDefaultIsStmt = true;
+ static constexpr int kLineBase = -5;
+ static constexpr int kLineRange = 14;
+
+ void AddRow() {
+ this->PushUint8(DW_LNS_copy);
+ }
+
+ void AdvancePC(uint64_t absolute_address) {
+ DCHECK_NE(current_address_, 0u); // Use SetAddress for the first advance.
+ DCHECK_GE(absolute_address, current_address_);
+ if (absolute_address != current_address_) {
+ uint64_t delta = FactorCodeOffset(absolute_address - current_address_);
+ if (delta <= INT32_MAX) {
+ this->PushUint8(DW_LNS_advance_pc);
+ this->PushUleb128(static_cast<int>(delta));
+ current_address_ = absolute_address;
+ } else {
+ SetAddress(absolute_address);
+ }
+ }
+ }
+
+ void AdvanceLine(int absolute_line) {
+ int delta = absolute_line - current_line_;
+ if (delta != 0) {
+ this->PushUint8(DW_LNS_advance_line);
+ this->PushSleb128(delta);
+ current_line_ = absolute_line;
+ }
+ }
+
+ void SetFile(int file) {
+ if (current_file_ != file) {
+ this->PushUint8(DW_LNS_set_file);
+ this->PushUleb128(file);
+ current_file_ = file;
+ }
+ }
+
+ void SetColumn(int column) {
+ this->PushUint8(DW_LNS_set_column);
+ this->PushUleb128(column);
+ }
+
+ void NegateStmt() {
+ this->PushUint8(DW_LNS_negate_stmt);
+ }
+
+ void SetBasicBlock() {
+ this->PushUint8(DW_LNS_set_basic_block);
+ }
+
+ void SetPrologueEnd() {
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_LNS_set_prologue_end);
+ }
+
+ void SetEpilogueBegin() {
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_LNS_set_epilogue_begin);
+ }
+
+ void SetISA(int isa) {
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_LNS_set_isa);
+ this->PushUleb128(isa);
+ }
+
+ void EndSequence() {
+ this->PushUint8(0);
+ this->PushUleb128(1);
+ this->PushUint8(DW_LNE_end_sequence);
+ current_address_ = 0;
+ current_file_ = 1;
+ current_line_ = 1;
+ }
+
+ // Uncoditionally set address using the long encoding.
+ // This gives the linker opportunity to relocate the address.
+ void SetAddress(uint64_t absolute_address) {
+ DCHECK_GE(absolute_address, current_address_);
+ FactorCodeOffset(absolute_address); // Check if it is factorable.
+ this->PushUint8(0);
+ if (use_64bit_address_) {
+ this->PushUleb128(1 + 8);
+ this->PushUint8(DW_LNE_set_address);
+ this->PushUint64(absolute_address);
+ } else {
+ this->PushUleb128(1 + 4);
+ this->PushUint8(DW_LNE_set_address);
+ this->PushUint32(absolute_address);
+ }
+ current_address_ = absolute_address;
+ }
+
+ void DefineFile(const char* filename,
+ int directory_index,
+ int modification_time,
+ int file_size) {
+ int size = 1 +
+ strlen(filename) + 1 +
+ UnsignedLeb128Size(directory_index) +
+ UnsignedLeb128Size(modification_time) +
+ UnsignedLeb128Size(file_size);
+ this->PushUint8(0);
+ this->PushUleb128(size);
+ size_t start = data()->size();
+ this->PushUint8(DW_LNE_define_file);
+ this->PushString(filename);
+ this->PushUleb128(directory_index);
+ this->PushUleb128(modification_time);
+ this->PushUleb128(file_size);
+ DCHECK_EQ(start + size, data()->size());
+ }
+
+ // Compact address and line opcode.
+ void AddRow(uint64_t absolute_address, int absolute_line) {
+ DCHECK_GE(absolute_address, current_address_);
+
+ // If the address is definitely too far, use the long encoding.
+ uint64_t delta_address = FactorCodeOffset(absolute_address - current_address_);
+ if (delta_address > UINT8_MAX) {
+ AdvancePC(absolute_address);
+ delta_address = 0;
+ }
+
+ // If the line is definitely too far, use the long encoding.
+ int delta_line = absolute_line - current_line_;
+ if (!(kLineBase <= delta_line && delta_line < kLineBase + kLineRange)) {
+ AdvanceLine(absolute_line);
+ delta_line = 0;
+ }
+
+ // Both address and line should be reasonable now. Use the short encoding.
+ int opcode = kOpcodeBase + (delta_line - kLineBase) +
+ (static_cast<int>(delta_address) * kLineRange);
+ if (opcode > UINT8_MAX) {
+ // If the address is still too far, try to increment it by const amount.
+ int const_advance = (0xFF - kOpcodeBase) / kLineRange;
+ opcode -= (kLineRange * const_advance);
+ if (opcode <= UINT8_MAX) {
+ this->PushUint8(DW_LNS_const_add_pc);
+ } else {
+ // Give up and use long encoding for address.
+ AdvancePC(absolute_address);
+ // Still use the opcode to do line advance and copy.
+ opcode = kOpcodeBase + (delta_line - kLineBase);
+ }
+ }
+ DCHECK(kOpcodeBase <= opcode && opcode <= 0xFF);
+ this->PushUint8(opcode); // Special opcode.
+ current_line_ = absolute_line;
+ current_address_ = absolute_address;
+ }
+
+ int GetCodeFactorBits() const {
+ return code_factor_bits_;
+ }
+
+ uint64_t CurrentAddress() const {
+ return current_address_;
+ }
+
+ int CurrentFile() const {
+ return current_file_;
+ }
+
+ int CurrentLine() const {
+ return current_line_;
+ }
+
+ using Writer<Allocator>::data;
+
+ DebugLineOpCodeWriter(bool use64bitAddress,
+ int codeFactorBits,
+ const Allocator& alloc = Allocator())
+ : Writer<Allocator>(&opcodes_),
+ opcodes_(alloc),
+ uses_dwarf3_features_(false),
+ use_64bit_address_(use64bitAddress),
+ code_factor_bits_(codeFactorBits),
+ current_address_(0),
+ current_file_(1),
+ current_line_(1) {
+ }
+
+ private:
+ uint64_t FactorCodeOffset(uint64_t offset) const {
+ DCHECK_GE(code_factor_bits_, 0);
+ DCHECK_EQ((offset >> code_factor_bits_) << code_factor_bits_, offset);
+ return offset >> code_factor_bits_;
+ }
+
+ std::vector<uint8_t, Allocator> opcodes_;
+ bool uses_dwarf3_features_;
+ bool use_64bit_address_;
+ int code_factor_bits_;
+ uint64_t current_address_;
+ int current_file_;
+ int current_line_;
+
+ DISALLOW_COPY_AND_ASSIGN(DebugLineOpCodeWriter);
+};
+
+} // namespace dwarf
+} // namespace art
+
+#endif // ART_COMPILER_DWARF_DEBUG_LINE_OPCODE_WRITER_H_
diff --git a/compiler/dwarf/debug_line_writer.h b/compiler/dwarf/debug_line_writer.h
new file mode 100644
index 0000000000..4b7d8d9d92
--- /dev/null
+++ b/compiler/dwarf/debug_line_writer.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DWARF_DEBUG_LINE_WRITER_H_
+#define ART_COMPILER_DWARF_DEBUG_LINE_WRITER_H_
+
+#include "debug_line_opcode_writer.h"
+#include "dwarf.h"
+#include "writer.h"
+#include <string>
+
+namespace art {
+namespace dwarf {
+
+// Writer for the .debug_line section (DWARF-3).
+template<typename Allocator = std::allocator<uint8_t>>
+class DebugLineWriter FINAL : private Writer<Allocator> {
+ public:
+ struct FileEntry {
+ std::string file_name;
+ int directory_index;
+ int modification_time;
+ int file_size;
+ };
+
+ void WriteTable(const std::vector<std::string>& include_directories,
+ const std::vector<FileEntry>& files,
+ const DebugLineOpCodeWriter<Allocator>& opcodes) {
+ size_t header_start = this->data()->size();
+ this->PushUint32(0); // Section-length placeholder.
+ // Claim DWARF-2 version even though we use some DWARF-3 features.
+ // DWARF-2 consumers will ignore the unknown opcodes.
+ // This is what clang currently does.
+ this->PushUint16(2); // .debug_line version.
+ size_t header_length_pos = this->data()->size();
+ this->PushUint32(0); // Header-length placeholder.
+ this->PushUint8(1 << opcodes.GetCodeFactorBits());
+ this->PushUint8(DebugLineOpCodeWriter<Allocator>::kDefaultIsStmt ? 1 : 0);
+ this->PushInt8(DebugLineOpCodeWriter<Allocator>::kLineBase);
+ this->PushUint8(DebugLineOpCodeWriter<Allocator>::kLineRange);
+ this->PushUint8(DebugLineOpCodeWriter<Allocator>::kOpcodeBase);
+ static const int opcode_lengths[DebugLineOpCodeWriter<Allocator>::kOpcodeBase] = {
+ 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1 };
+ for (int i = 1; i < DebugLineOpCodeWriter<Allocator>::kOpcodeBase; i++) {
+ this->PushUint8(opcode_lengths[i]);
+ }
+ for (const std::string& directory : include_directories) {
+ this->PushData(directory.data(), directory.size() + 1);
+ }
+ this->PushUint8(0); // Terminate include_directories list.
+ for (const FileEntry& file : files) {
+ this->PushData(file.file_name.data(), file.file_name.size() + 1);
+ this->PushUleb128(file.directory_index);
+ this->PushUleb128(file.modification_time);
+ this->PushUleb128(file.file_size);
+ }
+ this->PushUint8(0); // Terminate file list.
+ this->UpdateUint32(header_length_pos, this->data()->size() - header_length_pos - 4);
+ this->PushData(opcodes.data()->data(), opcodes.data()->size());
+ this->UpdateUint32(header_start, this->data()->size() - header_start - 4);
+ }
+
+ explicit DebugLineWriter(std::vector<uint8_t, Allocator>* buffer)
+ : Writer<Allocator>(buffer) {
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(DebugLineWriter);
+};
+
+} // namespace dwarf
+} // namespace art
+
+#endif // ART_COMPILER_DWARF_DEBUG_LINE_WRITER_H_
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc
new file mode 100644
index 0000000000..f3553bcc99
--- /dev/null
+++ b/compiler/dwarf/dwarf_test.cc
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dwarf_test.h"
+
+#include "dwarf/debug_frame_opcode_writer.h"
+#include "dwarf/debug_frame_writer.h"
+#include "dwarf/debug_line_opcode_writer.h"
+#include "dwarf/debug_line_writer.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace dwarf {
+
+// Run the tests only on host since we need objdump.
+#ifndef HAVE_ANDROID_OS
+
+TEST_F(DwarfTest, DebugFrame) {
+ const bool is64bit = false;
+
+ // Pick offset value which would catch Uleb vs Sleb errors.
+ const int offset = 40000;
+ ASSERT_EQ(UnsignedLeb128Size(offset / 4), 2u);
+ ASSERT_EQ(SignedLeb128Size(offset / 4), 3u);
+ DW_CHECK("Data alignment factor: -4");
+ const Reg reg(6);
+
+ // Test the opcodes in the order mentioned in the spec.
+ // There are usually several encoding variations of each opcode.
+ DebugFrameOpCodeWriter<> opcodes;
+ DW_CHECK("FDE");
+ int pc = 0;
+ for (int i : {0, 1, 0x3F, 0x40, 0xFF, 0x100, 0xFFFF, 0x10000}) {
+ pc += i;
+ opcodes.AdvancePC(pc);
+ }
+ DW_CHECK_NEXT("DW_CFA_advance_loc: 1 to 01000001");
+ DW_CHECK_NEXT("DW_CFA_advance_loc: 63 to 01000040");
+ DW_CHECK_NEXT("DW_CFA_advance_loc1: 64 to 01000080");
+ DW_CHECK_NEXT("DW_CFA_advance_loc1: 255 to 0100017f");
+ DW_CHECK_NEXT("DW_CFA_advance_loc2: 256 to 0100027f");
+ DW_CHECK_NEXT("DW_CFA_advance_loc2: 65535 to 0101027e");
+ DW_CHECK_NEXT("DW_CFA_advance_loc4: 65536 to 0102027e");
+ opcodes.DefCFA(reg, offset);
+ DW_CHECK_NEXT("DW_CFA_def_cfa: r6 (esi) ofs 40000");
+ opcodes.DefCFA(reg, -offset);
+ DW_CHECK_NEXT("DW_CFA_def_cfa_sf: r6 (esi) ofs -40000");
+ opcodes.DefCFARegister(reg);
+ DW_CHECK_NEXT("DW_CFA_def_cfa_register: r6 (esi)");
+ opcodes.DefCFAOffset(offset);
+ DW_CHECK_NEXT("DW_CFA_def_cfa_offset: 40000");
+ opcodes.DefCFAOffset(-offset);
+ DW_CHECK_NEXT("DW_CFA_def_cfa_offset_sf: -40000");
+ uint8_t expr[] = { 0 };
+ opcodes.DefCFAExpression(expr, arraysize(expr));
+ DW_CHECK_NEXT("DW_CFA_def_cfa_expression");
+ opcodes.Undefined(reg);
+ DW_CHECK_NEXT("DW_CFA_undefined: r6 (esi)");
+ opcodes.SameValue(reg);
+ DW_CHECK_NEXT("DW_CFA_same_value: r6 (esi)");
+ opcodes.Offset(Reg(0x3F), -offset);
+ // Bad register likely means that it does not exist on x86,
+ // but we want to test high register numbers anyway.
+ DW_CHECK_NEXT("DW_CFA_offset: bad register: r63 at cfa-40000");
+ opcodes.Offset(Reg(0x40), -offset);
+ DW_CHECK_NEXT("DW_CFA_offset_extended: bad register: r64 at cfa-40000");
+ opcodes.Offset(Reg(0x40), offset);
+ DW_CHECK_NEXT("DW_CFA_offset_extended_sf: bad register: r64 at cfa+40000");
+ opcodes.ValOffset(reg, -offset);
+ DW_CHECK_NEXT("DW_CFA_val_offset: r6 (esi) at cfa-40000");
+ opcodes.ValOffset(reg, offset);
+ DW_CHECK_NEXT("DW_CFA_val_offset_sf: r6 (esi) at cfa+40000");
+ opcodes.Register(reg, Reg(1));
+ DW_CHECK_NEXT("DW_CFA_register: r6 (esi) in r1 (ecx)");
+ opcodes.Expression(reg, expr, arraysize(expr));
+ DW_CHECK_NEXT("DW_CFA_expression: r6 (esi)");
+ opcodes.ValExpression(reg, expr, arraysize(expr));
+ DW_CHECK_NEXT("DW_CFA_val_expression: r6 (esi)");
+ opcodes.Restore(Reg(0x3F));
+ DW_CHECK_NEXT("DW_CFA_restore: bad register: r63");
+ opcodes.Restore(Reg(0x40));
+ DW_CHECK_NEXT("DW_CFA_restore_extended: bad register: r64");
+ opcodes.Restore(reg);
+ DW_CHECK_NEXT("DW_CFA_restore: r6 (esi)");
+ opcodes.RememberState();
+ DW_CHECK_NEXT("DW_CFA_remember_state");
+ opcodes.RestoreState();
+ DW_CHECK_NEXT("DW_CFA_restore_state");
+ opcodes.Nop();
+ DW_CHECK_NEXT("DW_CFA_nop");
+
+ // Also test helpers.
+ opcodes.DefCFA(Reg(4), 100); // ESP
+ DW_CHECK_NEXT("DW_CFA_def_cfa: r4 (esp) ofs 100");
+ opcodes.AdjustCFAOffset(8);
+ DW_CHECK_NEXT("DW_CFA_def_cfa_offset: 108");
+ opcodes.RelOffset(Reg(0), 0); // push R0
+ DW_CHECK_NEXT("DW_CFA_offset: r0 (eax) at cfa-108");
+ opcodes.RelOffset(Reg(1), 4); // push R1
+ DW_CHECK_NEXT("DW_CFA_offset: r1 (ecx) at cfa-104");
+ opcodes.RelOffsetForMany(Reg(2), 8, 1 | (1 << 3), 4); // push R2 and R5
+ DW_CHECK_NEXT("DW_CFA_offset: r2 (edx) at cfa-100");
+ DW_CHECK_NEXT("DW_CFA_offset: r5 (ebp) at cfa-96");
+ opcodes.RestoreMany(Reg(2), 1 | (1 << 3)); // pop R2 and R5
+ DW_CHECK_NEXT("DW_CFA_restore: r2 (edx)");
+ DW_CHECK_NEXT("DW_CFA_restore: r5 (ebp)");
+
+ DebugFrameWriter<> eh_frame(&eh_frame_data_, is64bit);
+ DebugFrameOpCodeWriter<> initial_opcodes;
+ eh_frame.WriteCIE(Reg(is64bit ? 16 : 8), // Return address register.
+ initial_opcodes); // Initial opcodes.
+ eh_frame.WriteFDE(0x01000000, 0x01000000,
+ opcodes.data()->data(), opcodes.data()->size());
+ CheckObjdumpOutput(is64bit, "-W");
+}
+
+TEST_F(DwarfTest, DebugFrame64) {
+ const bool is64bit = true;
+ DebugFrameWriter<> eh_frame(&eh_frame_data_, is64bit);
+ DebugFrameOpCodeWriter<> no_opcodes;
+ eh_frame.WriteCIE(Reg(16), no_opcodes);
+ eh_frame.WriteFDE(0x0100000000000000, 0x0200000000000000,
+ no_opcodes.data()->data(), no_opcodes.data()->size());
+ DW_CHECK("FDE cie=00000000 pc=100000000000000..300000000000000");
+ CheckObjdumpOutput(is64bit, "-W");
+}
+
+TEST_F(DwarfTest, DebugLine) {
+ const bool is64bit = false;
+ const int code_factor_bits = 1;
+ DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits);
+
+ std::vector<std::string> include_directories;
+ include_directories.push_back("/path/to/source");
+ DW_CHECK("/path/to/source");
+
+ std::vector<DebugLineWriter<>::FileEntry> files {
+ { "file0.c", 0, 1000, 2000 },
+ { "file1.c", 1, 1000, 2000 },
+ { "file2.c", 1, 1000, 2000 },
+ };
+ DW_CHECK("1\t0\t1000\t2000\tfile0.c");
+ DW_CHECK_NEXT("2\t1\t1000\t2000\tfile1.c");
+ DW_CHECK_NEXT("3\t1\t1000\t2000\tfile2.c");
+
+ DW_CHECK("Line Number Statements");
+ opcodes.SetAddress(0x01000000);
+ DW_CHECK_NEXT("Extended opcode 2: set Address to 0x1000000");
+ opcodes.AddRow();
+ DW_CHECK_NEXT("Copy");
+ opcodes.AdvancePC(0x01000100);
+ DW_CHECK_NEXT("Advance PC by 256 to 0x1000100");
+ opcodes.SetFile(2);
+ DW_CHECK_NEXT("Set File Name to entry 2 in the File Name Table");
+ opcodes.AdvanceLine(3);
+ DW_CHECK_NEXT("Advance Line by 2 to 3");
+ opcodes.SetColumn(4);
+ DW_CHECK_NEXT("Set column to 4");
+ opcodes.NegateStmt();
+ DW_CHECK_NEXT("Set is_stmt to 0");
+ opcodes.SetBasicBlock();
+ DW_CHECK_NEXT("Set basic block");
+ opcodes.SetPrologueEnd();
+ DW_CHECK_NEXT("Set prologue_end to true");
+ opcodes.SetEpilogueBegin();
+ DW_CHECK_NEXT("Set epilogue_begin to true");
+ opcodes.SetISA(5);
+ DW_CHECK_NEXT("Set ISA to 5");
+ opcodes.EndSequence();
+ DW_CHECK_NEXT("Extended opcode 1: End of Sequence");
+ opcodes.DefineFile("file.c", 0, 1000, 2000);
+ DW_CHECK_NEXT("Extended opcode 3: define new File Table entry");
+ DW_CHECK_NEXT("Entry\tDir\tTime\tSize\tName");
+ DW_CHECK_NEXT("1\t0\t1000\t2000\tfile.c");
+
+ DebugLineWriter<> debug_line(&debug_line_data_);
+ debug_line.WriteTable(include_directories, files, opcodes);
+ CheckObjdumpOutput(is64bit, "-W");
+}
+
+// DWARF has special one byte codes which advance PC and line at the same time.
+TEST_F(DwarfTest, DebugLineSpecialOpcodes) {
+ const bool is64bit = false;
+ const int code_factor_bits = 1;
+ uint32_t pc = 0x01000000;
+ int line = 1;
+ DebugLineOpCodeWriter<> opcodes(is64bit, code_factor_bits);
+ opcodes.SetAddress(pc);
+ size_t num_rows = 0;
+ DW_CHECK("Line Number Statements:");
+ DW_CHECK("Special opcode");
+ DW_CHECK("Advance PC by constant");
+ DW_CHECK("Decoded dump of debug contents of section .debug_line:");
+ DW_CHECK("Line number Starting address");
+ for (int addr_delta = 0; addr_delta < 80; addr_delta += 2) {
+ for (int line_delta = 16; line_delta >= -16; --line_delta) {
+ pc += addr_delta;
+ line += line_delta;
+ opcodes.AddRow(pc, line);
+ num_rows++;
+ ASSERT_EQ(opcodes.CurrentAddress(), pc);
+ ASSERT_EQ(opcodes.CurrentLine(), line);
+ char expected[1024];
+ sprintf(expected, "%i 0x%x", line, pc);
+ DW_CHECK_NEXT(expected);
+ }
+ }
+ EXPECT_LT(opcodes.data()->size(), num_rows * 3);
+
+ std::vector<std::string> directories;
+ std::vector<DebugLineWriter<>::FileEntry> files {
+ { "file.c", 0, 1000, 2000 },
+ };
+ DebugLineWriter<> debug_line(&debug_line_data_);
+ debug_line.WriteTable(directories, files, opcodes);
+ CheckObjdumpOutput(is64bit, "-W -WL");
+}
+
+#endif // HAVE_ANDROID_OS
+
+} // namespace dwarf
+} // namespace art
diff --git a/compiler/dwarf/dwarf_test.h b/compiler/dwarf/dwarf_test.h
new file mode 100644
index 0000000000..dd5e0c286e
--- /dev/null
+++ b/compiler/dwarf/dwarf_test.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DWARF_DWARF_TEST_H_
+#define ART_COMPILER_DWARF_DWARF_TEST_H_
+
+#include <cstring>
+#include <dirent.h>
+#include <memory>
+#include <set>
+#include <stdio.h>
+#include <string>
+#include <sys/types.h>
+
+#include "utils.h"
+#include "base/unix_file/fd_file.h"
+#include "common_runtime_test.h"
+#include "elf_builder.h"
+#include "gtest/gtest.h"
+#include "os.h"
+
+namespace art {
+namespace dwarf {
+
+#define DW_CHECK(substring) Check(substring, false, __FILE__, __LINE__)
+#define DW_CHECK_NEXT(substring) Check(substring, true, __FILE__, __LINE__)
+
+class DwarfTest : public CommonRuntimeTest {
+ public:
+ static constexpr bool kPrintObjdumpOutput = false; // debugging.
+
+ struct ExpectedLine {
+ std::string substring;
+ bool next;
+ const char* at_file;
+ int at_line;
+ };
+
+ // Check that the objdump output contains given output.
+ // If next is true, it must be the next line. Otherwise lines are skipped.
+ void Check(const char* substr, bool next, const char* at_file, int at_line) {
+ expected_lines_.push_back(ExpectedLine {substr, next, at_file, at_line});
+ }
+
+ static std::string GetObjdumpPath() {
+ const char* android_build_top = getenv("ANDROID_BUILD_TOP");
+ if (android_build_top != nullptr) {
+ std::string host_prebuilts = std::string(android_build_top) +
+ "/prebuilts/gcc/linux-x86/host/";
+ // Read the content of the directory.
+ std::set<std::string> entries;
+ DIR* dir = opendir(host_prebuilts.c_str());
+ if (dir != nullptr) {
+ struct dirent* entry;
+ while ((entry = readdir(dir)) != nullptr) {
+ if (strstr(entry->d_name, "linux-glibc")) {
+ entries.insert(host_prebuilts + entry->d_name);
+ }
+ }
+ closedir(dir);
+ }
+ // Strings are sorted so the last one should be the most recent version.
+ if (!entries.empty()) {
+ std::string path = *entries.rbegin() + "/x86_64-linux/bin/objdump";
+ struct stat st;
+ if (stat(path.c_str(), &st) == 0) {
+ return path; // File exists.
+ }
+ }
+ }
+ ADD_FAILURE() << "Can not find prebuild objdump.";
+ return "objdump"; // Use the system objdump as fallback.
+ }
+
+ // Pretty-print the generated DWARF data using objdump.
+ template<typename Elf_Word, typename Elf_Sword, typename Elf_Addr, typename Elf_Dyn,
+ typename Elf_Sym, typename Elf_Ehdr, typename Elf_Phdr, typename Elf_Shdr>
+ std::vector<std::string> Objdump(bool is64bit, const char* args) {
+ // Write simple elf file with just the DWARF sections.
+ class NoCode : public CodeOutput {
+ virtual void SetCodeOffset(size_t) { }
+ virtual bool Write(OutputStream*) { return true; }
+ } code;
+ ScratchFile file;
+ InstructionSet isa = is64bit ? kX86_64 : kX86;
+ ElfBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+ Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr> builder(
+ &code, file.GetFile(), isa, 0, 0, 0, 0, 0, 0, false, false);
+ typedef ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> Section;
+ if (!debug_info_data_.empty()) {
+ Section debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ debug_info.SetBuffer(debug_info_data_);
+ builder.RegisterRawSection(debug_info);
+ }
+ if (!debug_abbrev_data_.empty()) {
+ Section debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ debug_abbrev.SetBuffer(debug_abbrev_data_);
+ builder.RegisterRawSection(debug_abbrev);
+ }
+ if (!debug_str_data_.empty()) {
+ Section debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ debug_str.SetBuffer(debug_str_data_);
+ builder.RegisterRawSection(debug_str);
+ }
+ if (!debug_line_data_.empty()) {
+ Section debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ debug_line.SetBuffer(debug_line_data_);
+ builder.RegisterRawSection(debug_line);
+ }
+ if (!eh_frame_data_.empty()) {
+ Section eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
+ eh_frame.SetBuffer(eh_frame_data_);
+ builder.RegisterRawSection(eh_frame);
+ }
+ builder.Init();
+ builder.Write();
+
+ // Read the elf file back using objdump.
+ std::vector<std::string> lines;
+ std::string cmd = GetObjdumpPath();
+ cmd = cmd + " " + args + " " + file.GetFilename() + " 2>&1";
+ FILE* output = popen(cmd.data(), "r");
+ char buffer[1024];
+ const char* line;
+ while ((line = fgets(buffer, sizeof(buffer), output)) != nullptr) {
+ if (kPrintObjdumpOutput) {
+ printf("%s", line);
+ }
+ if (line[0] != '\0' && line[0] != '\n') {
+ EXPECT_TRUE(strstr(line, "objdump: Error:") == nullptr) << line;
+ EXPECT_TRUE(strstr(line, "objdump: Warning:") == nullptr) << line;
+ std::string str(line);
+ if (str.back() == '\n') {
+ str.pop_back();
+ }
+ lines.push_back(str);
+ }
+ }
+ pclose(output);
+ return lines;
+ }
+
+ std::vector<std::string> Objdump(bool is64bit, const char* args) {
+ if (is64bit) {
+ return Objdump<Elf64_Word, Elf64_Sword, Elf64_Addr, Elf64_Dyn,
+ Elf64_Sym, Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr>(is64bit, args);
+ } else {
+ return Objdump<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
+ Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr>(is64bit, args);
+ }
+ }
+
+ // Compare objdump output to the recorded checks.
+ void CheckObjdumpOutput(bool is64bit, const char* args) {
+ std::vector<std::string> actual_lines = Objdump(is64bit, args);
+ auto actual_line = actual_lines.begin();
+ for (const ExpectedLine& expected_line : expected_lines_) {
+ const std::string& substring = expected_line.substring;
+ if (actual_line == actual_lines.end()) {
+ ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) <<
+ "Expected '" << substring << "'.\n" <<
+ "Seen end of output.";
+ } else if (expected_line.next) {
+ if (actual_line->find(substring) == std::string::npos) {
+ ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) <<
+ "Expected '" << substring << "'.\n" <<
+ "Seen '" << actual_line->data() << "'.";
+ } else {
+ // printf("Found '%s' in '%s'.\n", substring.data(), actual_line->data());
+ }
+ actual_line++;
+ } else {
+ bool found = false;
+ for (auto it = actual_line; it < actual_lines.end(); it++) {
+ if (it->find(substring) != std::string::npos) {
+ actual_line = it;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ ADD_FAILURE_AT(expected_line.at_file, expected_line.at_line) <<
+ "Expected '" << substring << "'.\n" <<
+ "Not found anywhere in the rest of the output.";
+ } else {
+ // printf("Found '%s' in '%s'.\n", substring.data(), actual_line->data());
+ actual_line++;
+ }
+ }
+ }
+ }
+
+ // Buffers which are going to assembled into ELF file and passed to objdump.
+ std::vector<uint8_t> eh_frame_data_;
+ std::vector<uint8_t> debug_info_data_;
+ std::vector<uint8_t> debug_abbrev_data_;
+ std::vector<uint8_t> debug_str_data_;
+ std::vector<uint8_t> debug_line_data_;
+
+ // The expected output of objdump.
+ std::vector<ExpectedLine> expected_lines_;
+};
+
+} // namespace dwarf
+} // namespace art
+
+#endif // ART_COMPILER_DWARF_DWARF_TEST_H_
diff --git a/compiler/dwarf/register.h b/compiler/dwarf/register.h
new file mode 100644
index 0000000000..fa666dffa9
--- /dev/null
+++ b/compiler/dwarf/register.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DWARF_REGISTER_H_
+#define ART_COMPILER_DWARF_REGISTER_H_
+
+namespace art {
+namespace dwarf {
+
+// Represents DWARF register.
+class Reg {
+ public:
+ explicit Reg(int reg_num) : num_(reg_num) { }
+ int num() const { return num_; }
+
+ // TODO: Arm S0–S31 register mapping is obsolescent.
+ // We should use VFP-v3/Neon D0-D31 mapping instead.
+ // However, D0 is aliased to pair of S0 and S1, so using that
+ // mapping we can not easily say S0 is spilled and S1 is not.
+ // There are ways around this in DWARF but they are complex.
+ // It would be much simpler to always spill whole D registers.
+ // Arm64 mapping is correct since we already do this there.
+
+ static Reg ArmCore(int num) { return Reg(num); }
+ static Reg ArmFp(int num) { return Reg(64 + num); } // S0–S31.
+ static Reg Arm64Core(int num) { return Reg(num); }
+ static Reg Arm64Fp(int num) { return Reg(64 + num); } // V0-V31.
+ static Reg MipsCore(int num) { return Reg(num); }
+ static Reg Mips64Core(int num) { return Reg(num); }
+ static Reg X86Core(int num) { return Reg(num); }
+ static Reg X86Fp(int num) { return Reg(21 + num); }
+ static Reg X86_64Core(int num) {
+ static const int map[8] = {0, 2, 1, 3, 7, 6, 4, 5};
+ return Reg(num < 8 ? map[num] : num);
+ }
+ static Reg X86_64Fp(int num) { return Reg(17 + num); }
+
+ private:
+ int num_;
+};
+
+} // namespace dwarf
+} // namespace art
+
+#endif // ART_COMPILER_DWARF_REGISTER_H_
diff --git a/compiler/dwarf/writer.h b/compiler/dwarf/writer.h
new file mode 100644
index 0000000000..d8e29f0986
--- /dev/null
+++ b/compiler/dwarf/writer.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DWARF_WRITER_H_
+#define ART_COMPILER_DWARF_WRITER_H_
+
+#include <vector>
+#include "leb128.h"
+#include "base/logging.h"
+#include "utils.h"
+
+namespace art {
+namespace dwarf {
+
+// The base class for all DWARF writers.
+template<typename Allocator = std::allocator<uint8_t>>
+class Writer {
+ public:
+ void PushUint8(int value) {
+ DCHECK_GE(value, 0);
+ DCHECK_LE(value, UINT8_MAX);
+ data_->push_back(value & 0xff);
+ }
+
+ void PushUint16(int value) {
+ DCHECK_GE(value, 0);
+ DCHECK_LE(value, UINT16_MAX);
+ data_->push_back((value >> 0) & 0xff);
+ data_->push_back((value >> 8) & 0xff);
+ }
+
+ void PushUint32(uint32_t value) {
+ data_->push_back((value >> 0) & 0xff);
+ data_->push_back((value >> 8) & 0xff);
+ data_->push_back((value >> 16) & 0xff);
+ data_->push_back((value >> 24) & 0xff);
+ }
+
+ void PushUint32(int value) {
+ DCHECK_GE(value, 0);
+ PushUint32(static_cast<uint32_t>(value));
+ }
+
+ void PushUint32(uint64_t value) {
+ DCHECK_LE(value, UINT32_MAX);
+ PushUint32(static_cast<uint32_t>(value));
+ }
+
+ void PushUint64(uint64_t value) {
+ data_->push_back((value >> 0) & 0xff);
+ data_->push_back((value >> 8) & 0xff);
+ data_->push_back((value >> 16) & 0xff);
+ data_->push_back((value >> 24) & 0xff);
+ data_->push_back((value >> 32) & 0xff);
+ data_->push_back((value >> 40) & 0xff);
+ data_->push_back((value >> 48) & 0xff);
+ data_->push_back((value >> 56) & 0xff);
+ }
+
+ void PushInt8(int value) {
+ DCHECK_GE(value, INT8_MIN);
+ DCHECK_LE(value, INT8_MAX);
+ PushUint8(static_cast<uint8_t>(value));
+ }
+
+ void PushInt16(int value) {
+ DCHECK_GE(value, INT16_MIN);
+ DCHECK_LE(value, INT16_MAX);
+ PushUint16(static_cast<uint16_t>(value));
+ }
+
+ void PushInt32(int value) {
+ PushUint32(static_cast<uint32_t>(value));
+ }
+
+ void PushInt64(int64_t value) {
+ PushUint64(static_cast<uint64_t>(value));
+ }
+
+ // Variable-length encoders.
+
+ void PushUleb128(uint32_t value) {
+ EncodeUnsignedLeb128(data_, value);
+ }
+
+ void PushUleb128(int value) {
+ DCHECK_GE(value, 0);
+ EncodeUnsignedLeb128(data_, value);
+ }
+
+ void PushSleb128(int value) {
+ EncodeSignedLeb128(data_, value);
+ }
+
+ // Miscellaneous functions.
+
+ void PushString(const char* value) {
+ data_->insert(data_->end(), value, value + strlen(value) + 1);
+ }
+
+ void PushData(const void* ptr, size_t size) {
+ const char* p = reinterpret_cast<const char*>(ptr);
+ data_->insert(data_->end(), p, p + size);
+ }
+
+ void UpdateUint32(size_t offset, uint32_t value) {
+ DCHECK_LT(offset + 3, data_->size());
+ (*data_)[offset + 0] = (value >> 0) & 0xFF;
+ (*data_)[offset + 1] = (value >> 8) & 0xFF;
+ (*data_)[offset + 2] = (value >> 16) & 0xFF;
+ (*data_)[offset + 3] = (value >> 24) & 0xFF;
+ }
+
+ void UpdateUint64(size_t offset, uint64_t value) {
+ DCHECK_LT(offset + 7, data_->size());
+ (*data_)[offset + 0] = (value >> 0) & 0xFF;
+ (*data_)[offset + 1] = (value >> 8) & 0xFF;
+ (*data_)[offset + 2] = (value >> 16) & 0xFF;
+ (*data_)[offset + 3] = (value >> 24) & 0xFF;
+ (*data_)[offset + 4] = (value >> 32) & 0xFF;
+ (*data_)[offset + 5] = (value >> 40) & 0xFF;
+ (*data_)[offset + 6] = (value >> 48) & 0xFF;
+ (*data_)[offset + 7] = (value >> 56) & 0xFF;
+ }
+
+ void Pad(int alignment) {
+ DCHECK_NE(alignment, 0);
+ data_->resize(RoundUp(data_->size(), alignment), 0);
+ }
+
+ const std::vector<uint8_t, Allocator>* data() const {
+ return data_;
+ }
+
+ explicit Writer(std::vector<uint8_t, Allocator>* buffer) : data_(buffer) { }
+
+ private:
+ std::vector<uint8_t, Allocator>* data_;
+
+ DISALLOW_COPY_AND_ASSIGN(Writer);
+};
+
+} // namespace dwarf
+} // namespace art
+
+#endif // ART_COMPILER_DWARF_WRITER_H_
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index fd3a9121ae..8e2d175af9 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -46,11 +46,7 @@ class ElfWriterTest : public CommonCompilerTest {
EXPECT_EQ(expected_value, ef->FindDynamicSymbolAddress(symbol_name)); \
} while (false)
-#if defined(ART_USE_OPTIMIZING_COMPILER)
-TEST_F(ElfWriterTest, DISABLED_dlsym) {
-#else
TEST_F(ElfWriterTest, dlsym) {
-#endif
std::string elf_location = GetCoreOatLocation();
std::string elf_filename = GetSystemImageFilename(elf_location.c_str(), kRuntimeISA);
LOG(INFO) << "elf_filename=" << elf_filename;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 8b311542f6..df5d5cca3b 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -89,7 +89,8 @@ JitCompiler::JitCompiler() : total_time_(0) {
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
- method_inliner_map_.get()));
+ method_inliner_map_.get(),
+ CompilerCallbacks::CallbackMode::kCompileApp));
compiler_driver_.reset(new CompilerDriver(
compiler_options_.get(), verification_results_.get(), method_inliner_map_.get(),
Compiler::kQuick, instruction_set, instruction_set_features_.get(), false,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 728da277ce..503068c734 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -88,8 +88,6 @@ TEST_F(OatTest, WriteRead) {
compiler_options_.reset(new CompilerOptions);
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
- callbacks_.reset(new QuickCompilerCallbacks(verification_results_.get(),
- method_inliner_map_.get()));
timer_.reset(new CumulativeLogger("Compilation times"));
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index e9ca042f1d..be432c5a20 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -59,7 +59,8 @@ static HInstruction* GetOppositeCondition(HInstruction* cond) {
return new (allocator) HGreaterThan(lhs, rhs);
} else if (cond->IsGreaterThan()) {
return new (allocator) HLessThanOrEqual(lhs, rhs);
- } else if (cond->IsGreaterThanOrEqual()) {
+ } else {
+ DCHECK(cond->IsGreaterThanOrEqual());
return new (allocator) HLessThan(lhs, rhs);
}
} else if (cond->IsIntConstant()) {
@@ -70,10 +71,11 @@ static HInstruction* GetOppositeCondition(HInstruction* cond) {
DCHECK(int_const->IsOne());
return graph->GetIntConstant(0);
}
+ } else {
+ // General case when 'cond' is another instruction of type boolean.
+ // Negate with 'cond == 0'.
+ return new (allocator) HEqual(cond, graph->GetIntConstant(0));
}
-
- LOG(FATAL) << "Instruction " << cond->DebugName() << " used as a condition";
- UNREACHABLE();
}
void HBooleanSimplifier::Run() {
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 0f79d189be..1f95041a92 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -2087,16 +2087,32 @@ void InstructionCodeGeneratorARM::VisitMul(HMul* mul) {
}
void LocationsBuilderARM::VisitDiv(HDiv* div) {
- LocationSummary::CallKind call_kind = div->GetResultType() == Primitive::kPrimLong
- ? LocationSummary::kCall
- : LocationSummary::kNoCall;
+ LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
+ if (div->GetResultType() == Primitive::kPrimLong) {
+ // pLdiv runtime call.
+ call_kind = LocationSummary::kCall;
+ } else if (div->GetResultType() == Primitive::kPrimInt &&
+ !codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ // pIdivmod runtime call.
+ call_kind = LocationSummary::kCall;
+ }
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(div, call_kind);
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+ // we only need the former.
+ locations->SetOut(Location::RegisterLocation(R0));
+ }
break;
}
case Primitive::kPrimLong: {
@@ -2129,9 +2145,18 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
switch (div->GetResultType()) {
case Primitive::kPrimInt: {
- __ sdiv(out.AsRegister<Register>(),
- first.AsRegister<Register>(),
- second.AsRegister<Register>());
+ if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ __ sdiv(out.AsRegister<Register>(),
+ first.AsRegister<Register>(),
+ second.AsRegister<Register>());
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
+ DCHECK_EQ(R0, out.AsRegister<Register>());
+
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), div, div->GetDexPc(), nullptr);
+ }
break;
}
@@ -2169,17 +2194,32 @@ void InstructionCodeGeneratorARM::VisitDiv(HDiv* div) {
void LocationsBuilderARM::VisitRem(HRem* rem) {
Primitive::Type type = rem->GetResultType();
- LocationSummary::CallKind call_kind = type == Primitive::kPrimInt
- ? LocationSummary::kNoCall
- : LocationSummary::kCall;
+
+ // Most remainders are implemented in the runtime.
+ LocationSummary::CallKind call_kind = LocationSummary::kCall;
+ if (rem->GetResultType() == Primitive::kPrimInt &&
+ codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ // Have hardware divide instruction for int, do it with three instructions.
+ call_kind = LocationSummary::kNoCall;
+ }
+
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
switch (type) {
case Primitive::kPrimInt: {
- locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
- locations->AddTemp(Location::RequiresRegister());
+ if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+ locations->AddTemp(Location::RequiresRegister());
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+ // we only need the latter.
+ locations->SetOut(Location::RegisterLocation(R1));
+ }
break;
}
case Primitive::kPrimLong: {
@@ -2224,16 +2264,25 @@ void InstructionCodeGeneratorARM::VisitRem(HRem* rem) {
Primitive::Type type = rem->GetResultType();
switch (type) {
case Primitive::kPrimInt: {
- Register reg1 = first.AsRegister<Register>();
- Register reg2 = second.AsRegister<Register>();
- Register temp = locations->GetTemp(0).AsRegister<Register>();
+ if (codegen_->GetInstructionSetFeatures().HasDivideInstruction()) {
+ Register reg1 = first.AsRegister<Register>();
+ Register reg2 = second.AsRegister<Register>();
+ Register temp = locations->GetTemp(0).AsRegister<Register>();
+
+ // temp = reg1 / reg2 (integer division)
+ // temp = temp * reg2
+ // dest = reg1 - temp
+ __ sdiv(temp, reg1, reg2);
+ __ mul(temp, temp, reg2);
+ __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
+ } else {
+ InvokeRuntimeCallingConvention calling_convention;
+ DCHECK_EQ(calling_convention.GetRegisterAt(0), first.AsRegister<Register>());
+ DCHECK_EQ(calling_convention.GetRegisterAt(1), second.AsRegister<Register>());
+ DCHECK_EQ(R1, out.AsRegister<Register>());
- // temp = reg1 / reg2 (integer division)
- // temp = temp * reg2
- // dest = reg1 - temp
- __ sdiv(temp, reg1, reg2);
- __ mul(temp, temp, reg2);
- __ sub(out.AsRegister<Register>(), reg1, ShifterOperand(temp));
+ codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pIdivmod), rem, rem->GetDexPc(), nullptr);
+ }
break;
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8d0ca0beb9..0d5fe49c1d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3146,7 +3146,7 @@ void InstructionCodeGeneratorX86::GenerateExplicitNullCheck(HNullCheck* instruct
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmpl(obj.AsRegister<Register>(), Immediate(0));
+ __ testl(obj.AsRegister<Register>(), obj.AsRegister<Register>());
} else if (obj.IsStackSlot()) {
__ cmpl(Address(ESP, obj.GetStackIndex()), Immediate(0));
} else {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 4b990f1ddd..2c17a67867 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -49,7 +49,8 @@ void HInliner::Run() {
for (HInstruction* instruction = block->GetFirstInstruction(); instruction != nullptr;) {
HInstruction* next = instruction->GetNext();
HInvokeStaticOrDirect* call = instruction->AsInvokeStaticOrDirect();
- if (call != nullptr) {
+ // As long as the call is not intrinsified, it is worth trying to inline.
+ if (call != nullptr && call->GetIntrinsic() == Intrinsics::kNone) {
// We use the original invoke type to ensure the resolution of the called method
// works properly.
if (!TryInline(call, call->GetDexMethodIndex(), call->GetOriginalInvokeType())) {
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 36cf8568e5..628a844cc7 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -191,8 +191,10 @@ static Intrinsics GetIntrinsic(InlineMethod method) {
case kIntrinsicCompareTo:
return Intrinsics::kStringCompareTo;
case kIntrinsicIsEmptyOrLength:
- return ((method.d.data & kIntrinsicFlagIsEmpty) == 0) ?
- Intrinsics::kStringLength : Intrinsics::kStringIsEmpty;
+ // The inliner can handle these two cases - and this is the preferred approach
+ // since after inlining the call is no longer visible (as opposed to waiting
+ // until codegen to handle intrinsic).
+ return Intrinsics::kNone;
case kIntrinsicIndexOf:
return ((method.d.data & kIntrinsicFlagBase0) == 0) ?
Intrinsics::kStringIndexOfAfter : Intrinsics::kStringIndexOf;
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 3fb6a7d2d0..33176f009c 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -862,7 +862,7 @@ void IntrinsicCodeGeneratorARM::VisitStringCompareTo(HInvoke* invoke) {
ArmAssembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
- // Note that the null check must have be done earlier.
+ // Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheck());
Register argument = locations->InAt(1).AsRegister<Register>();
@@ -903,8 +903,6 @@ UNIMPLEMENTED_INTRINSIC(MathRoundDouble) // Could be done by changing rounding
UNIMPLEMENTED_INTRINSIC(MathRoundFloat) // Could be done by changing rounding mode, maybe?
UNIMPLEMENTED_INTRINSIC(UnsafeCASLong) // High register pressure.
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(StringIsEmpty) // Might not want to do these two anyways, inlining should
-UNIMPLEMENTED_INTRINSIC(StringLength) // be good enough here.
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 04e8fdcbe4..72d303c870 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1006,7 +1006,7 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) {
vixl::MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
- // Note that the null check must have be done earlier.
+ // Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheck());
Register argument = WRegisterFrom(locations->InAt(1));
@@ -1030,8 +1030,6 @@ void IntrinsicCodeGeneratorARM64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED
}
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
-UNIMPLEMENTED_INTRINSIC(StringIsEmpty) // Might not want to do these two anyways, inlining should
-UNIMPLEMENTED_INTRINSIC(StringLength) // be good enough here.
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h
index 9cc77c6251..10f6e1d6c7 100644
--- a/compiler/optimizing/intrinsics_list.h
+++ b/compiler/optimizing/intrinsics_list.h
@@ -60,10 +60,8 @@
V(MemoryPokeShortNative, kStatic) \
V(StringCharAt, kDirect) \
V(StringCompareTo, kDirect) \
- V(StringIsEmpty, kDirect) \
V(StringIndexOf, kDirect) \
V(StringIndexOfAfter, kDirect) \
- V(StringLength, kDirect) \
V(UnsafeCASInt, kDirect) \
V(UnsafeCASLong, kDirect) \
V(UnsafeCASObject, kDirect) \
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 74edf50971..384737f55a 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -790,7 +790,7 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
X86Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
- // Note that the null check must have be done earlier.
+ // Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheck());
Register argument = locations->InAt(1).AsRegister<Register>();
@@ -1196,8 +1196,6 @@ UNIMPLEMENTED_INTRINSIC(MathCeil)
UNIMPLEMENTED_INTRINSIC(MathRint)
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(StringIsEmpty) // Might not want to do these two anyways, inlining should
-UNIMPLEMENTED_INTRINSIC(StringLength) // be good enough here.
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index e4f2518a16..736cea88cb 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -681,7 +681,7 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) {
X86_64Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
- // Note that the null check must have be done earlier.
+ // Note that the null check must have been done earlier.
DCHECK(!invoke->CanDoImplicitNullCheck());
CpuRegister argument = locations->InAt(1).AsRegister<CpuRegister>();
@@ -1014,8 +1014,6 @@ UNIMPLEMENTED_INTRINSIC(MathCeil)
UNIMPLEMENTED_INTRINSIC(MathRint)
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(StringIsEmpty) // Might not want to do these two anyways, inlining should
-UNIMPLEMENTED_INTRINSIC(StringLength) // be good enough here.
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index c0df02b5cc..e474c49121 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -585,8 +585,13 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
if (method != nullptr) {
return method;
}
- return delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
- class_loader, dex_file);
+ method = delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
+
+ if (method != nullptr) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kCompiledQuick);
+ }
+ return method;
}
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 22ec2a5167..b97a66719d 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -28,6 +28,7 @@ enum MethodCompilationStat {
kAttemptCompilation = 0,
kCompiledBaseline,
kCompiledOptimized,
+ kCompiledQuick,
kInlinedInvoke,
kNotCompiledUnsupportedIsa,
kNotCompiledPathological,
@@ -65,16 +66,22 @@ class OptimizingCompilerStats {
compile_stats_[kCompiledBaseline] * 100 / compile_stats_[kAttemptCompilation];
size_t optimized_percent =
compile_stats_[kCompiledOptimized] * 100 / compile_stats_[kAttemptCompilation];
+ size_t quick_percent =
+ compile_stats_[kCompiledQuick] * 100 / compile_stats_[kAttemptCompilation];
std::ostringstream oss;
- oss << "Attempted compilation of " << compile_stats_[kAttemptCompilation] << " methods: "
- << unoptimized_percent << "% (" << compile_stats_[kCompiledBaseline] << ") unoptimized, "
- << optimized_percent << "% (" << compile_stats_[kCompiledOptimized] << ") optimized.";
+ oss << "Attempted compilation of " << compile_stats_[kAttemptCompilation] << " methods: ";
+
+ oss << unoptimized_percent << "% (" << compile_stats_[kCompiledBaseline] << ") unoptimized, ";
+ oss << optimized_percent << "% (" << compile_stats_[kCompiledOptimized] << ") optimized, ";
+ oss << quick_percent << "% (" << compile_stats_[kCompiledQuick] << ") quick.";
+
+ LOG(INFO) << oss.str();
+
for (int i = 0; i < kLastStat; i++) {
if (compile_stats_[i] != 0) {
- oss << "\n" << PrintMethodCompilationStat(i) << ": " << compile_stats_[i];
+ VLOG(compiler) << PrintMethodCompilationStat(i) << ": " << compile_stats_[i];
}
}
- LOG(INFO) << oss.str();
}
}
@@ -84,6 +91,7 @@ class OptimizingCompilerStats {
case kAttemptCompilation : return "kAttemptCompilation";
case kCompiledBaseline : return "kCompiledBaseline";
case kCompiledOptimized : return "kCompiledOptimized";
+ case kCompiledQuick : return "kCompiledQuick";
case kInlinedInvoke : return "kInlinedInvoke";
case kNotCompiledUnsupportedIsa : return "kNotCompiledUnsupportedIsa";
case kNotCompiledPathological : return "kNotCompiledPathological";
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index 7d0641ec13..9df8f5640d 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include <iostream>
#include "parallel_move_resolver.h"
#include "nodes.h"
@@ -63,39 +64,42 @@ void ParallelMoveResolver::BuildInitialMoveList(HParallelMove* parallel_move) {
}
}
+Location LowOf(Location location) {
+ if (location.IsRegisterPair()) {
+ return Location::RegisterLocation(location.low());
+ } else if (location.IsFpuRegisterPair()) {
+ return Location::FpuRegisterLocation(location.low());
+ } else if (location.IsDoubleStackSlot()) {
+ return Location::StackSlot(location.GetStackIndex());
+ } else {
+ return Location::NoLocation();
+ }
+}
+
+Location HighOf(Location location) {
+ if (location.IsRegisterPair()) {
+ return Location::RegisterLocation(location.high());
+ } else if (location.IsFpuRegisterPair()) {
+ return Location::FpuRegisterLocation(location.high());
+ } else if (location.IsDoubleStackSlot()) {
+ return Location::StackSlot(location.GetHighStackIndex(4));
+ } else {
+ return Location::NoLocation();
+ }
+}
+
// Update the source of `move`, knowing that `updated_location` has been swapped
// with `new_source`. Note that `updated_location` can be a pair, therefore if
// `move` is non-pair, we need to extract which register to use.
static void UpdateSourceOf(MoveOperands* move, Location updated_location, Location new_source) {
Location source = move->GetSource();
- if (new_source.GetKind() == source.GetKind()) {
- DCHECK(updated_location.Equals(source));
- move->SetSource(new_source);
- } else if (new_source.IsStackSlot()
- || new_source.IsDoubleStackSlot()
- || source.IsStackSlot()
- || source.IsDoubleStackSlot()) {
- // Stack slots never take part of a pair/non-pair swap.
- DCHECK(updated_location.Equals(source));
+ if (LowOf(updated_location).Equals(source)) {
+ move->SetSource(LowOf(new_source));
+ } else if (HighOf(updated_location).Equals(source)) {
+ move->SetSource(HighOf(new_source));
+ } else {
+ DCHECK(updated_location.Equals(source)) << updated_location << " " << source;
move->SetSource(new_source);
- } else if (source.IsRegister()) {
- DCHECK(new_source.IsRegisterPair()) << new_source;
- DCHECK(updated_location.IsRegisterPair()) << updated_location;
- if (updated_location.low() == source.reg()) {
- move->SetSource(Location::RegisterLocation(new_source.low()));
- } else {
- DCHECK_EQ(updated_location.high(), source.reg());
- move->SetSource(Location::RegisterLocation(new_source.high()));
- }
- } else if (source.IsFpuRegister()) {
- DCHECK(new_source.IsFpuRegisterPair()) << new_source;
- DCHECK(updated_location.IsFpuRegisterPair()) << updated_location;
- if (updated_location.low() == source.reg()) {
- move->SetSource(Location::FpuRegisterLocation(new_source.low()));
- } else {
- DCHECK_EQ(updated_location.high(), source.reg());
- move->SetSource(Location::FpuRegisterLocation(new_source.high()));
- }
}
}
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 817a44b184..5c502f7ef4 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -31,8 +31,13 @@ class TestParallelMoveResolver : public ParallelMoveResolver {
message_ << "C";
} else if (location.IsPair()) {
message_ << location.low() << "," << location.high();
- } else {
+ } else if (location.IsRegister()) {
message_ << location.reg();
+ } else if (location.IsStackSlot()) {
+ message_ << location.GetStackIndex() << "(sp)";
+ } else {
+ message_ << "2x" << location.GetStackIndex() << "(sp)";
+ DCHECK(location.IsDoubleStackSlot()) << location;
}
}
@@ -279,6 +284,26 @@ TEST(ParallelMoveTest, Pairs) {
resolver.EmitNativeCode(moves);
ASSERT_STREQ("(0,1 <-> 2,3)", resolver.GetMessage().c_str());
}
+
+ {
+ // Test involving registers used in single context and pair context.
+ TestParallelMoveResolver resolver(&allocator);
+ HParallelMove* moves = new (&allocator) HParallelMove(&allocator);
+ moves->AddMove(
+ Location::RegisterLocation(10),
+ Location::RegisterLocation(5),
+ nullptr);
+ moves->AddMove(
+ Location::RegisterPairLocation(4, 5),
+ Location::DoubleStackSlot(32),
+ nullptr);
+ moves->AddMove(
+ Location::DoubleStackSlot(32),
+ Location::RegisterPairLocation(10, 11),
+ nullptr);
+ resolver.EmitNativeCode(moves);
+ ASSERT_STREQ("(2x32(sp) <-> 10,11) (4,5 <-> 2x32(sp)) (4 -> 5)", resolver.GetMessage().c_str());
+ }
}
} // namespace art
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index cecc210cbf..cf38bd3f8c 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -213,7 +213,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
LiveInterval* interval =
LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
temp_intervals_.Add(interval);
- interval->AddRange(position, position + 1);
+ interval->AddTempUse(instruction, i);
unhandled_core_intervals_.Add(interval);
break;
}
@@ -222,7 +222,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
LiveInterval* interval =
LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble);
temp_intervals_.Add(interval);
- interval->AddRange(position, position + 1);
+ interval->AddTempUse(instruction, i);
if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
interval->AddHighInterval(true);
LiveInterval* high = interval->GetHighInterval();
@@ -851,6 +851,23 @@ bool RegisterAllocator::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position
return false;
}
+bool RegisterAllocator::PotentiallyRemoveOtherHalf(LiveInterval* interval,
+ GrowableArray<LiveInterval*>* intervals,
+ size_t index) {
+ if (interval->IsLowInterval()) {
+ DCHECK_EQ(intervals->Get(index), interval->GetHighInterval());
+ intervals->DeleteAt(index);
+ return true;
+ } else if (interval->IsHighInterval()) {
+ DCHECK_GT(index, 0u);
+ DCHECK_EQ(intervals->Get(index - 1), interval->GetLowInterval());
+ intervals->DeleteAt(index - 1);
+ return true;
+ } else {
+ return false;
+ }
+}
+
// Find the register that is used the last, and spill the interval
// that holds it. If the first use of `current` is after that register
// we spill `current` instead.
@@ -974,33 +991,17 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
if (active->GetRegister() == reg) {
DCHECK(!active->IsFixed());
LiveInterval* split = Split(active, current->GetStart());
- active_.DeleteAt(i);
if (split != active) {
handled_.Add(active);
}
+ active_.DeleteAt(i);
+ PotentiallyRemoveOtherHalf(active, &active_, i);
AddSorted(unhandled_, split);
-
- if (active->IsLowInterval() || active->IsHighInterval()) {
- LiveInterval* other_half = active->IsLowInterval()
- ? active->GetHighInterval()
- : active->GetLowInterval();
- // We also need to remove the other half from the list of actives.
- bool found = false;
- for (size_t j = 0; j < active_.Size(); ++j) {
- if (active_.Get(j) == other_half) {
- found = true;
- active_.DeleteAt(j);
- handled_.Add(other_half);
- break;
- }
- }
- DCHECK(found);
- }
break;
}
}
- for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
+ for (size_t i = 0; i < inactive_.Size(); ++i) {
LiveInterval* inactive = inactive_.Get(i);
if (inactive->GetRegister() == reg) {
if (!current->IsSplit() && !inactive->IsFixed()) {
@@ -1024,29 +1025,14 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
// If it's inactive, it must start before the current interval.
DCHECK_NE(split, inactive);
inactive_.DeleteAt(i);
+ if (PotentiallyRemoveOtherHalf(inactive, &inactive_, i) && inactive->IsHighInterval()) {
+ // We have removed an entry prior to `inactive`. So we need to decrement.
+ --i;
+ }
+ // Decrement because we have removed `inactive` from the list.
--i;
- --e;
handled_.Add(inactive);
AddSorted(unhandled_, split);
-
- if (inactive->IsLowInterval() || inactive->IsHighInterval()) {
- LiveInterval* other_half = inactive->IsLowInterval()
- ? inactive->GetHighInterval()
- : inactive->GetLowInterval();
-
- // We also need to remove the other half from the list of inactives.
- bool found = false;
- for (size_t j = 0; j < inactive_.Size(); ++j) {
- if (inactive_.Get(j) == other_half) {
- found = true;
- inactive_.DeleteAt(j);
- --e;
- handled_.Add(other_half);
- break;
- }
- }
- DCHECK(found);
- }
}
}
}
@@ -1695,8 +1681,6 @@ void RegisterAllocator::Resolve() {
}
// Assign temp locations.
- HInstruction* current = nullptr;
- size_t temp_index = 0;
for (size_t i = 0; i < temp_intervals_.Size(); ++i) {
LiveInterval* temp = temp_intervals_.Get(i);
if (temp->IsHighInterval()) {
@@ -1704,25 +1688,20 @@ void RegisterAllocator::Resolve() {
continue;
}
HInstruction* at = liveness_.GetTempUser(temp);
- if (at != current) {
- temp_index = 0;
- current = at;
- }
+ size_t temp_index = liveness_.GetTempIndex(temp);
LocationSummary* locations = at->GetLocations();
switch (temp->GetType()) {
case Primitive::kPrimInt:
- locations->SetTempAt(
- temp_index++, Location::RegisterLocation(temp->GetRegister()));
+ locations->SetTempAt(temp_index, Location::RegisterLocation(temp->GetRegister()));
break;
case Primitive::kPrimDouble:
if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
Location location = Location::FpuRegisterPairLocation(
temp->GetRegister(), temp->GetHighInterval()->GetRegister());
- locations->SetTempAt(temp_index++, location);
+ locations->SetTempAt(temp_index, location);
} else {
- locations->SetTempAt(
- temp_index++, Location::FpuRegisterLocation(temp->GetRegister()));
+ locations->SetTempAt(temp_index, Location::FpuRegisterLocation(temp->GetRegister()));
}
break;
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index fcc61128a6..717be75533 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -144,6 +144,13 @@ class RegisterAllocator {
size_t first_register_use,
size_t* next_use);
+ // If `interval` has another half, remove it from the list of `intervals`.
+ // `index` holds the index at which `interval` is in `intervals`.
+ // Returns whether there is another half.
+ bool PotentiallyRemoveOtherHalf(LiveInterval* interval,
+ GrowableArray<LiveInterval*>* intervals,
+ size_t index);
+
ArenaAllocator* const allocator_;
CodeGenerator* const codegen_;
const SsaLivenessAnalysis& liveness_;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 56ccd717cf..0f3973e5fb 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -318,6 +318,8 @@ static int RegisterOrLowRegister(Location location) {
int LiveInterval::FindFirstRegisterHint(size_t* free_until) const {
DCHECK(!IsHighInterval());
+ if (IsTemp()) return kNoRegister;
+
if (GetParent() == this && defined_by_ != nullptr) {
// This is the first interval for the instruction. Try to find
// a register based on its definition.
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index b57029d1a7..bc78dc2e76 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -180,6 +180,15 @@ class LiveInterval : public ArenaObject<kArenaAllocMisc> {
// This interval is the result of a split.
bool IsSplit() const { return parent_ != this; }
+ void AddTempUse(HInstruction* instruction, size_t temp_index) {
+ DCHECK(IsTemp());
+ DCHECK(first_use_ == nullptr) << "A temporary can only have one user";
+ size_t position = instruction->GetLifetimePosition();
+ first_use_ = new (allocator_) UsePosition(
+ instruction, temp_index, /* is_environment */ false, position, first_use_);
+ AddRange(position, position + 1);
+ }
+
void AddUse(HInstruction* instruction, size_t input_index, bool is_environment) {
// Set the use within the instruction.
size_t position = instruction->GetLifetimePosition() + 1;
@@ -856,7 +865,15 @@ class SsaLivenessAnalysis : public ValueObject {
HInstruction* GetTempUser(LiveInterval* temp) const {
// A temporary shares the same lifetime start as the instruction that requires it.
DCHECK(temp->IsTemp());
- return GetInstructionFromPosition(temp->GetStart() / 2);
+ HInstruction* user = GetInstructionFromPosition(temp->GetStart() / 2);
+ DCHECK_EQ(user, temp->GetFirstUse()->GetUser());
+ return user;
+ }
+
+ size_t GetTempIndex(LiveInterval* temp) const {
+ // We use the input index to store the index of the temporary in the user's temporary list.
+ DCHECK(temp->IsTemp());
+ return temp->GetFirstUse()->GetInputIndex();
}
size_t GetMaxLifetimePosition() const {
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 5818a37a46..a73c8d77f3 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -27,6 +27,32 @@
namespace art {
+// Helper to build art::StackMapStream::LocationCatalogEntriesIndices.
+class LocationCatalogEntriesIndicesEmptyFn {
+ public:
+ void MakeEmpty(std::pair<DexRegisterLocation, size_t>& item) const {
+ item.first = DexRegisterLocation::None();
+ }
+ bool IsEmpty(const std::pair<DexRegisterLocation, size_t>& item) const {
+ return item.first == DexRegisterLocation::None();
+ }
+};
+
+// Hash function for art::StackMapStream::LocationCatalogEntriesIndices.
+// This hash function does not create collisions.
+class DexRegisterLocationHashFn {
+ public:
+ size_t operator()(DexRegisterLocation key) const {
+ // Concatenate `key`s fields to create a 64-bit value to be hashed.
+ int64_t kind_and_value =
+ (static_cast<int64_t>(key.kind_) << 32) | static_cast<int64_t>(key.value_);
+ return inner_hash_fn_(kind_and_value);
+ }
+ private:
+ std::hash<int64_t> inner_hash_fn_;
+};
+
+
/**
* Collects and builds stack maps for a method. All the stack maps
* for a method are placed in a CodeInfo object.
@@ -36,11 +62,13 @@ class StackMapStream : public ValueObject {
explicit StackMapStream(ArenaAllocator* allocator)
: allocator_(allocator),
stack_maps_(allocator, 10),
+ location_catalog_entries_(allocator, 4),
dex_register_locations_(allocator, 10 * 4),
inline_infos_(allocator, 2),
stack_mask_max_(-1),
dex_pc_max_(0),
native_pc_offset_max_(0),
+ register_mask_max_(0),
number_of_stack_maps_with_inline_info_(0),
dex_map_hash_to_stack_map_indices_(std::less<uint32_t>(), allocator->Adapter()) {}
@@ -101,6 +129,7 @@ class StackMapStream : public ValueObject {
dex_pc_max_ = std::max(dex_pc_max_, dex_pc);
native_pc_offset_max_ = std::max(native_pc_offset_max_, native_pc_offset);
+ register_mask_max_ = std::max(register_mask_max_, register_mask);
}
void AddInlineInfoEntry(uint32_t method_index) {
@@ -111,6 +140,7 @@ class StackMapStream : public ValueObject {
size_t ComputeNeededSize() {
size_t size = CodeInfo::kFixedSize
+ + ComputeDexRegisterLocationCatalogSize()
+ ComputeStackMapsSize()
+ ComputeDexRegisterMapsSize()
+ ComputeInlineInfoSize();
@@ -128,24 +158,43 @@ class StackMapStream : public ValueObject {
ComputeInlineInfoSize(),
ComputeDexRegisterMapsSize(),
dex_pc_max_,
- native_pc_offset_max_);
+ native_pc_offset_max_,
+ register_mask_max_);
+ }
+
+ // Compute the size of the Dex register location catalog of `entry`.
+ size_t ComputeDexRegisterLocationCatalogSize() const {
+ size_t size = DexRegisterLocationCatalog::kFixedSize;
+ for (size_t location_catalog_entry_index = 0;
+ location_catalog_entry_index < location_catalog_entries_.Size();
+ ++location_catalog_entry_index) {
+ DexRegisterLocation dex_register_location =
+ location_catalog_entries_.Get(location_catalog_entry_index);
+ size += DexRegisterLocationCatalog::EntrySize(dex_register_location);
+ }
+ return size;
}
- // Compute the size of the Dex register map of `entry`.
size_t ComputeDexRegisterMapSize(const StackMapEntry& entry) const {
+ // Size of the map in bytes.
size_t size = DexRegisterMap::kFixedSize;
- // Add the bit mask for the dex register liveness.
- size += DexRegisterMap::LiveBitMaskSize(entry.num_dex_registers);
- for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
+ // Add the live bit mask for the Dex register liveness.
+ size += DexRegisterMap::GetLiveBitMaskSize(entry.num_dex_registers);
+ // Compute the size of the set of live Dex register entries.
+ size_t number_of_live_dex_registers = 0;
+ for (size_t dex_register_number = 0;
dex_register_number < entry.num_dex_registers;
++dex_register_number) {
if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
- DexRegisterLocation dex_register_location = dex_register_locations_.Get(
- entry.dex_register_locations_start_index + index_in_dex_register_locations);
- size += DexRegisterMap::EntrySize(dex_register_location);
- index_in_dex_register_locations++;
+ ++number_of_live_dex_registers;
}
}
+ size_t map_entries_size_in_bits =
+ DexRegisterMap::SingleEntrySizeInBits(location_catalog_entries_.Size())
+ * number_of_live_dex_registers;
+ size_t map_entries_size_in_bytes =
+ RoundUp(map_entries_size_in_bits, kBitsPerByte) / kBitsPerByte;
+ size += map_entries_size_in_bytes;
return size;
}
@@ -168,8 +217,16 @@ class StackMapStream : public ValueObject {
+ (number_of_stack_maps_with_inline_info_ * InlineInfo::kFixedSize);
}
+ size_t ComputeDexRegisterLocationCatalogStart() const {
+ return CodeInfo::kFixedSize;
+ }
+
+ size_t ComputeStackMapsStart() const {
+ return ComputeDexRegisterLocationCatalogStart() + ComputeDexRegisterLocationCatalogSize();
+ }
+
size_t ComputeDexRegisterMapsStart() {
- return CodeInfo::kFixedSize + ComputeStackMapsSize();
+ return ComputeStackMapsStart() + ComputeStackMapsSize();
}
size_t ComputeInlineInfoStart() {
@@ -194,11 +251,32 @@ class StackMapStream : public ValueObject {
ComputeInlineInfoStart(),
inline_info_size);
- code_info.SetEncoding(
- inline_info_size, dex_register_map_size, dex_pc_max_, native_pc_offset_max_);
+ code_info.SetEncoding(inline_info_size,
+ dex_register_map_size,
+ dex_pc_max_,
+ native_pc_offset_max_,
+ register_mask_max_);
code_info.SetNumberOfStackMaps(stack_maps_.Size());
code_info.SetStackMaskSize(stack_mask_size);
- DCHECK_EQ(code_info.StackMapsSize(), ComputeStackMapsSize());
+ DCHECK_EQ(code_info.GetStackMapsSize(), ComputeStackMapsSize());
+
+ // Set the Dex register location catalog.
+ code_info.SetNumberOfDexRegisterLocationCatalogEntries(
+ location_catalog_entries_.Size());
+ MemoryRegion dex_register_location_catalog_region = region.Subregion(
+ ComputeDexRegisterLocationCatalogStart(),
+ ComputeDexRegisterLocationCatalogSize());
+ DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
+ // Offset in `dex_register_location_catalog` where to store the next
+ // register location.
+ size_t location_catalog_offset = DexRegisterLocationCatalog::kFixedSize;
+ for (size_t i = 0, e = location_catalog_entries_.Size(); i < e; ++i) {
+ DexRegisterLocation dex_register_location = location_catalog_entries_.Get(i);
+ dex_register_location_catalog.SetRegisterInfo(location_catalog_offset, dex_register_location);
+ location_catalog_offset += DexRegisterLocationCatalog::EntrySize(dex_register_location);
+ }
+ // Ensure we reached the end of the Dex registers location_catalog.
+ DCHECK_EQ(location_catalog_offset, dex_register_location_catalog_region.size());
uintptr_t next_dex_register_map_offset = 0;
uintptr_t next_inline_info_offset = 0;
@@ -234,25 +312,25 @@ class StackMapStream : public ValueObject {
stack_map.SetDexRegisterMapOffset(
code_info, register_region.start() - dex_register_locations_region.start());
- // Offset in `dex_register_map` where to store the next register entry.
- size_t offset = DexRegisterMap::kFixedSize;
- dex_register_map.SetLiveBitMask(offset,
- entry.num_dex_registers,
- *entry.live_dex_registers_mask);
- offset += DexRegisterMap::LiveBitMaskSize(entry.num_dex_registers);
+ // Set the live bit mask.
+ dex_register_map.SetLiveBitMask(entry.num_dex_registers, *entry.live_dex_registers_mask);
+
+ // Set the dex register location mapping data.
for (size_t dex_register_number = 0, index_in_dex_register_locations = 0;
dex_register_number < entry.num_dex_registers;
++dex_register_number) {
if (entry.live_dex_registers_mask->IsBitSet(dex_register_number)) {
- DexRegisterLocation dex_register_location = dex_register_locations_.Get(
- entry.dex_register_locations_start_index + index_in_dex_register_locations);
- dex_register_map.SetRegisterInfo(offset, dex_register_location);
- offset += DexRegisterMap::EntrySize(dex_register_location);
+ size_t location_catalog_entry_index =
+ dex_register_locations_.Get(entry.dex_register_locations_start_index
+ + index_in_dex_register_locations);
+ dex_register_map.SetLocationCatalogEntryIndex(
+ index_in_dex_register_locations,
+ location_catalog_entry_index,
+ entry.num_dex_registers,
+ location_catalog_entries_.Size());
++index_in_dex_register_locations;
}
}
- // Ensure we reached the end of the Dex registers region.
- DCHECK_EQ(offset, register_region.size());
}
}
@@ -282,12 +360,31 @@ class StackMapStream : public ValueObject {
}
void AddDexRegisterEntry(uint16_t dex_register, DexRegisterLocation::Kind kind, int32_t value) {
+ StackMapEntry entry = stack_maps_.Get(stack_maps_.Size() - 1);
+ DCHECK_LT(dex_register, entry.num_dex_registers);
+
if (kind != DexRegisterLocation::Kind::kNone) {
// Ensure we only use non-compressed location kind at this stage.
DCHECK(DexRegisterLocation::IsShortLocationKind(kind))
<< DexRegisterLocation::PrettyDescriptor(kind);
- dex_register_locations_.Add(DexRegisterLocation(kind, value));
- StackMapEntry entry = stack_maps_.Get(stack_maps_.Size() - 1);
+ DexRegisterLocation location(kind, value);
+
+ // Look for Dex register `location` in the location catalog (using the
+ // companion hash map of locations to indices). Use its index if it
+ // is already in the location catalog. If not, insert it (in the
+ // location catalog and the hash map) and use the newly created index.
+ auto it = location_catalog_entries_indices_.Find(location);
+ if (it != location_catalog_entries_indices_.end()) {
+ // Retrieve the index from the hash map.
+ dex_register_locations_.Add(it->second);
+ } else {
+ // Create a new entry in the location catalog and the hash map.
+ size_t index = location_catalog_entries_.Size();
+ location_catalog_entries_.Add(location);
+ dex_register_locations_.Add(index);
+ location_catalog_entries_indices_.Insert(std::make_pair(location, index));
+ }
+
entry.live_dex_registers_mask->SetBit(dex_register);
entry.dex_register_map_hash += (1 << dex_register);
entry.dex_register_map_hash += static_cast<uint32_t>(value);
@@ -354,9 +451,9 @@ class StackMapStream : public ValueObject {
return false;
}
if (a.live_dex_registers_mask->IsBitSet(i)) {
- DexRegisterLocation a_loc = dex_register_locations_.Get(
+ size_t a_loc = dex_register_locations_.Get(
a.dex_register_locations_start_index + index_in_dex_register_locations);
- DexRegisterLocation b_loc = dex_register_locations_.Get(
+ size_t b_loc = dex_register_locations_.Get(
b.dex_register_locations_start_index + index_in_dex_register_locations);
if (a_loc != b_loc) {
return false;
@@ -369,21 +466,29 @@ class StackMapStream : public ValueObject {
ArenaAllocator* allocator_;
GrowableArray<StackMapEntry> stack_maps_;
- GrowableArray<DexRegisterLocation> dex_register_locations_;
+
+ // A catalog of unique [location_kind, register_value] pairs (per method).
+ GrowableArray<DexRegisterLocation> location_catalog_entries_;
+ // Map from Dex register location catalog entries to their indices in the
+ // location catalog.
+ typedef HashMap<DexRegisterLocation, size_t, LocationCatalogEntriesIndicesEmptyFn,
+ DexRegisterLocationHashFn> LocationCatalogEntriesIndices;
+ LocationCatalogEntriesIndices location_catalog_entries_indices_;
+
+ // A set of concatenated maps of Dex register locations indices to
+ // `location_catalog_entries_`.
+ GrowableArray<size_t> dex_register_locations_;
GrowableArray<InlineInfoEntry> inline_infos_;
int stack_mask_max_;
uint32_t dex_pc_max_;
uint32_t native_pc_offset_max_;
+ uint32_t register_mask_max_;
size_t number_of_stack_maps_with_inline_info_;
ArenaSafeMap<uint32_t, GrowableArray<uint32_t>> dex_map_hash_to_stack_map_indices_;
static constexpr uint32_t kNoSameDexMapFound = -1;
- ART_FRIEND_TEST(StackMapTest, Test1);
- ART_FRIEND_TEST(StackMapTest, Test2);
- ART_FRIEND_TEST(StackMapTest, TestNonLiveDexRegisters);
-
DISALLOW_COPY_AND_ASSIGN(StackMapStream);
};
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index e5a9790254..8d160bc81e 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -31,6 +31,8 @@ static bool SameBits(MemoryRegion region, const BitVector& bit_vector) {
return true;
}
+using Kind = DexRegisterLocation::Kind;
+
TEST(StackMapTest, Test1) {
ArenaPool pool;
ArenaAllocator arena(&pool);
@@ -39,8 +41,8 @@ TEST(StackMapTest, Test1) {
ArenaBitVector sp_mask(&arena, 0, false);
size_t number_of_dex_registers = 2;
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInStack, 0);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddDexRegisterEntry(0, Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Short location.
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -51,6 +53,16 @@ TEST(StackMapTest, Test1) {
ASSERT_EQ(0u, code_info.GetStackMaskSize());
ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(2u, number_of_location_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ // The Dex register location catalog contains:
+ // - one 1-byte short Dex register location, and
+ // - one 5-byte large Dex register location.
+ size_t expected_location_catalog_size = 1u + 5u;
+ ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
+
StackMap stack_map = code_info.GetStackMapAt(0);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
@@ -62,14 +74,40 @@ TEST(StackMapTest, Test1) {
ASSERT_TRUE(SameBits(stack_mask, sp_mask));
ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
- DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(7u, dex_registers.Size());
- DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0, number_of_dex_registers);
- DexRegisterLocation location1 = dex_registers.GetLocationKindAndValue(1, number_of_dex_registers);
- ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kConstant, location1.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetInternalKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kConstantLargeValue, location1.GetInternalKind());
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_dex_register_map_size = 1u + 1u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kInStack,
+ dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstant,
+ dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInStack,
+ dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstantLargeValue,
+ dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(0u, index0);
+ ASSERT_EQ(1u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInStack, location0.GetKind());
+ ASSERT_EQ(Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(Kind::kInStack, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kConstantLargeValue, location1.GetInternalKind());
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
@@ -86,8 +124,8 @@ TEST(StackMapTest, Test2) {
sp_mask1.SetBit(4);
size_t number_of_dex_registers = 2;
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInStack, 0);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddDexRegisterEntry(0, Kind::kInStack, 0); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
stream.AddInlineInfoEntry(42);
stream.AddInlineInfoEntry(82);
@@ -95,8 +133,8 @@ TEST(StackMapTest, Test2) {
sp_mask2.SetBit(3);
sp_mask1.SetBit(8);
stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInRegister, 18);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kInFpuRegister, 3);
+ stream.AddDexRegisterEntry(0, Kind::kInRegister, 18); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kInFpuRegister, 3); // Short location.
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -107,6 +145,16 @@ TEST(StackMapTest, Test2) {
ASSERT_EQ(1u, code_info.GetStackMaskSize());
ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(4u, number_of_location_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ // The Dex register location catalog contains:
+ // - three 1-byte short Dex register locations, and
+ // - one 5-byte large Dex register location.
+ size_t expected_location_catalog_size = 3u * 1u + 5u;
+ ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
+
// First stack map.
{
StackMap stack_map = code_info.GetStackMapAt(0);
@@ -120,17 +168,40 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
- DexRegisterMap dex_registers =
+ DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(7u, dex_registers.Size());
- DexRegisterLocation location0 =
- dex_registers.GetLocationKindAndValue(0, number_of_dex_registers);
- DexRegisterLocation location1 =
- dex_registers.GetLocationKindAndValue(1, number_of_dex_registers);
- ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kConstant, location1.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInStack, location0.GetInternalKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kConstantLargeValue, location1.GetInternalKind());
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_dex_register_map_size = 1u + 1u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kInStack,
+ dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstant,
+ dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInStack,
+ dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstantLargeValue,
+ dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(0, dex_register_map.GetStackOffsetInBytes(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(0u, index0);
+ ASSERT_EQ(1u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInStack, location0.GetKind());
+ ASSERT_EQ(Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(Kind::kInStack, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kConstantLargeValue, location1.GetInternalKind());
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
@@ -154,17 +225,40 @@ TEST(StackMapTest, Test2) {
ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
- DexRegisterMap dex_registers =
+ DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
- ASSERT_EQ(3u, dex_registers.Size());
- DexRegisterLocation location0 =
- dex_registers.GetLocationKindAndValue(0, number_of_dex_registers);
- DexRegisterLocation location1 =
- dex_registers.GetLocationKindAndValue(1, number_of_dex_registers);
- ASSERT_EQ(DexRegisterLocation::Kind::kInRegister, location0.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInFpuRegister, location1.GetKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInRegister, location0.GetInternalKind());
- ASSERT_EQ(DexRegisterLocation::Kind::kInFpuRegister, location1.GetInternalKind());
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(2u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask, and
+ // - one 1-byte set of location catalog entry indices composed of two 2-bit values.
+ size_t expected_dex_register_map_size = 1u + 1u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kInRegister,
+ dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInFpuRegister,
+ dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInRegister,
+ dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kInFpuRegister,
+ dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(18, dex_register_map.GetMachineRegister(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(3, dex_register_map.GetMachineRegister(1, number_of_dex_registers, code_info));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(2u, index0);
+ ASSERT_EQ(3u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kInRegister, location0.GetKind());
+ ASSERT_EQ(Kind::kInFpuRegister, location1.GetKind());
+ ASSERT_EQ(Kind::kInRegister, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kInFpuRegister, location1.GetInternalKind());
ASSERT_EQ(18, location0.GetValue());
ASSERT_EQ(3, location1.GetValue());
@@ -180,8 +274,8 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
ArenaBitVector sp_mask(&arena, 0, false);
uint32_t number_of_dex_registers = 2;
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kNone, 0);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddDexRegisterEntry(0, Kind::kNone, 0); // No location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -189,14 +283,62 @@ TEST(StackMapTest, TestNonLiveDexRegisters) {
stream.FillIn(region);
CodeInfo code_info(region);
+ ASSERT_EQ(0u, code_info.GetStackMaskSize());
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(1u, number_of_location_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ // The Dex register location catalog contains:
+ // - one 5-byte large Dex register location.
+ size_t expected_location_catalog_size = 5u;
+ ASSERT_EQ(expected_location_catalog_size, location_catalog.Size());
+
StackMap stack_map = code_info.GetStackMapAt(0);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
+
ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
- DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
- ASSERT_EQ(DexRegisterLocation::Kind::kNone,
- dex_registers.GetLocationKind(0, number_of_dex_registers));
- ASSERT_EQ(DexRegisterLocation::Kind::kConstant,
- dex_registers.GetLocationKind(1, number_of_dex_registers));
- ASSERT_EQ(-2, dex_registers.GetConstant(1, number_of_dex_registers));
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0));
+ ASSERT_TRUE(dex_register_map.IsDexRegisterLive(1));
+ ASSERT_EQ(1u, dex_register_map.GetNumberOfLiveDexRegisters(number_of_dex_registers));
+ // The Dex register map contains:
+ // - one 1-byte live bit mask.
+ // No space is allocated for the sole location catalog entry index, as it is useless.
+ size_t expected_dex_register_map_size = 1u + 0u;
+ ASSERT_EQ(expected_dex_register_map_size, dex_register_map.Size());
+
+ ASSERT_EQ(Kind::kNone,
+ dex_register_map.GetLocationKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstant,
+ dex_register_map.GetLocationKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kNone,
+ dex_register_map.GetLocationInternalKind(0, number_of_dex_registers, code_info));
+ ASSERT_EQ(Kind::kConstantLargeValue,
+ dex_register_map.GetLocationInternalKind(1, number_of_dex_registers, code_info));
+ ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info));
+
+ size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
+ 0, number_of_dex_registers, number_of_location_catalog_entries);
+ size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
+ 1, number_of_dex_registers, number_of_location_catalog_entries);
+ ASSERT_EQ(DexRegisterLocationCatalog::kNoLocationEntryIndex, index0);
+ ASSERT_EQ(0u, index1);
+ DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
+ DexRegisterLocation location1 = location_catalog.GetDexRegisterLocation(index1);
+ ASSERT_EQ(Kind::kNone, location0.GetKind());
+ ASSERT_EQ(Kind::kConstant, location1.GetKind());
+ ASSERT_EQ(Kind::kNone, location0.GetInternalKind());
+ ASSERT_EQ(Kind::kConstantLargeValue, location1.GetInternalKind());
+ ASSERT_EQ(0, location0.GetValue());
+ ASSERT_EQ(-2, location1.GetValue());
+
ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
}
@@ -209,14 +351,21 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
StackMapStream stream(&arena);
ArenaBitVector sp_mask(&arena, 0, false);
- uint32_t number_of_dex_registers = 0xEA;
+ uint32_t number_of_dex_registers = 1024;
+ // Create the first stack map (and its Dex register map).
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- for (uint32_t i = 0; i < number_of_dex_registers - 9; ++i) {
- stream.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, 0);
+ uint32_t number_of_dex_live_registers_in_dex_register_map_0 = number_of_dex_registers - 8;
+ for (uint32_t i = 0; i < number_of_dex_live_registers_in_dex_register_map_0; ++i) {
+ // Use two different Dex register locations to populate this map,
+ // as using a single value (in the whole CodeInfo object) would
+ // make this Dex register mapping data empty (see
+ // art::DexRegisterMap::SingleEntrySizeInBits).
+ stream.AddDexRegisterEntry(i, Kind::kConstant, i % 2); // Short location.
}
+ // Create the second stack map (and its Dex register map).
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
for (uint32_t i = 0; i < number_of_dex_registers; ++i) {
- stream.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, 0);
+ stream.AddDexRegisterEntry(i, Kind::kConstant, 0); // Short location.
}
size_t size = stream.ComputeNeededSize();
@@ -225,10 +374,35 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) {
stream.FillIn(region);
CodeInfo code_info(region);
- StackMap stack_map = code_info.GetStackMapAt(1);
- ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info));
- ASSERT_NE(stack_map.GetDexRegisterMapOffset(code_info), StackMap::kNoDexRegisterMap);
- ASSERT_EQ(stack_map.GetDexRegisterMapOffset(code_info), StackMap::kNoDexRegisterMapSmallEncoding);
+ // The location catalog contains two entries (DexRegisterLocation(kConstant, 0)
+ // and DexRegisterLocation(kConstant, 1)), therefore the location catalog index
+ // has a size of 1 bit.
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(2u, number_of_location_catalog_entries);
+ ASSERT_EQ(1u, DexRegisterMap::SingleEntrySizeInBits(number_of_location_catalog_entries));
+
+ // The first Dex register map contains:
+ // - a live register bit mask for 1024 registers (that is, 128 bytes of
+ // data); and
+ // - Dex register mapping information for 1016 1-bit Dex (live) register
+ // locations (that is, 127 bytes of data).
+ // Hence it has a size of 255 bytes, and therefore...
+ ASSERT_EQ(128u, DexRegisterMap::GetLiveBitMaskSize(number_of_dex_registers));
+ StackMap stack_map0 = code_info.GetStackMapAt(0);
+ DexRegisterMap dex_register_map0 =
+ code_info.GetDexRegisterMapOf(stack_map0, number_of_dex_registers);
+ ASSERT_EQ(127u, dex_register_map0.GetLocationMappingDataSize(number_of_dex_registers,
+ number_of_location_catalog_entries));
+ ASSERT_EQ(255u, dex_register_map0.Size());
+
+ StackMap stack_map1 = code_info.GetStackMapAt(1);
+ ASSERT_TRUE(stack_map1.HasDexRegisterMap(code_info));
+ // ...the offset of the second Dex register map (relative to the
+ // beginning of the Dex register maps region) is 255 (i.e.,
+ // kNoDexRegisterMapSmallEncoding).
+ ASSERT_NE(stack_map1.GetDexRegisterMapOffset(code_info), StackMap::kNoDexRegisterMap);
+ ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(code_info), 0xFFu);
}
TEST(StackMapTest, TestShareDexRegisterMap) {
@@ -240,16 +414,16 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
uint32_t number_of_dex_registers = 2;
// First stack map.
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInRegister, 0);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddDexRegisterEntry(0, Kind::kInRegister, 0); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
// Second stack map, which should share the same dex register map.
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInRegister, 0);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddDexRegisterEntry(0, Kind::kInRegister, 0); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
// Third stack map (doesn't share the dex register map).
stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
- stream.AddDexRegisterEntry(0, DexRegisterLocation::Kind::kInRegister, 2);
- stream.AddDexRegisterEntry(1, DexRegisterLocation::Kind::kConstant, -2);
+ stream.AddDexRegisterEntry(0, Kind::kInRegister, 2); // Short location.
+ stream.AddDexRegisterEntry(1, Kind::kConstant, -2); // Large location.
size_t size = stream.ComputeNeededSize();
void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -260,20 +434,20 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
// Verify first stack map.
StackMap sm0 = ci.GetStackMapAt(0);
DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm0, number_of_dex_registers);
- ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers));
- ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers));
+ ASSERT_EQ(0, dex_registers0.GetMachineRegister(0, number_of_dex_registers, ci));
+ ASSERT_EQ(-2, dex_registers0.GetConstant(1, number_of_dex_registers, ci));
// Verify second stack map.
StackMap sm1 = ci.GetStackMapAt(1);
DexRegisterMap dex_registers1 = ci.GetDexRegisterMapOf(sm1, number_of_dex_registers);
- ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers));
- ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers));
+ ASSERT_EQ(0, dex_registers1.GetMachineRegister(0, number_of_dex_registers, ci));
+ ASSERT_EQ(-2, dex_registers1.GetConstant(1, number_of_dex_registers, ci));
// Verify third stack map.
StackMap sm2 = ci.GetStackMapAt(2);
DexRegisterMap dex_registers2 = ci.GetDexRegisterMapOf(sm2, number_of_dex_registers);
- ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers));
- ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers));
+ ASSERT_EQ(2, dex_registers2.GetMachineRegister(0, number_of_dex_registers, ci));
+ ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci));
// Verify dex register map offsets.
ASSERT_EQ(sm0.GetDexRegisterMapOffset(ci), sm1.GetDexRegisterMapOffset(ci));
@@ -281,4 +455,39 @@ TEST(StackMapTest, TestShareDexRegisterMap) {
ASSERT_NE(sm1.GetDexRegisterMapOffset(ci), sm2.GetDexRegisterMapOffset(ci));
}
+TEST(StackMapTest, TestNoDexRegisterMap) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ StackMapStream stream(&arena);
+
+ ArenaBitVector sp_mask(&arena, 0, false);
+ uint32_t number_of_dex_registers = 0;
+ stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
+
+ size_t size = stream.ComputeNeededSize();
+ void* memory = arena.Alloc(size, kArenaAllocMisc);
+ MemoryRegion region(memory, size);
+ stream.FillIn(region);
+
+ CodeInfo code_info(region);
+ ASSERT_EQ(0u, code_info.GetStackMaskSize());
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+
+ uint32_t number_of_location_catalog_entries =
+ code_info.GetNumberOfDexRegisterLocationCatalogEntries();
+ ASSERT_EQ(0u, number_of_location_catalog_entries);
+ DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog();
+ ASSERT_EQ(0u, location_catalog.Size());
+
+ StackMap stack_map = code_info.GetStackMapAt(0);
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
+ ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
+ ASSERT_EQ(0u, stack_map.GetDexPc(code_info));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info));
+
+ ASSERT_FALSE(stack_map.HasDexRegisterMap(code_info));
+ ASSERT_FALSE(stack_map.HasInlineInfo(code_info));
+}
+
} // namespace art