summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2015-01-14 15:45:59 -0800
committerAndreas Gampe <agampe@google.com>2015-01-15 11:32:48 -0800
commit57b34294758e9c00993913ebe43c7ee4698a5cc6 (patch)
tree981821619027686f83fbe00445299b0522f1df05 /compiler
parent4945bfef00ac446d9c5458e55500229d463ab4c3 (diff)
downloadandroid_art-57b34294758e9c00993913ebe43c7ee4698a5cc6.tar.gz
android_art-57b34294758e9c00993913ebe43c7ee4698a5cc6.tar.bz2
android_art-57b34294758e9c00993913ebe43c7ee4698a5cc6.zip
ART: Allow to compile interpret-only mips64 files
Include enough infrastructure to allow cross-compiling for mips64, interpret-only. This includes the instruction-set-features, frame size info and utils assembler. Also add a disassembler for oatdump, and support in patchoat. Note: the runtime cannot run mips64, yet. Change-Id: Id106581fa76b478984741c62a8a03be0f370d992
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk2
-rw-r--r--compiler/compiled_method.cc2
-rw-r--r--compiler/driver/compiler_driver.cc3
-rw-r--r--compiler/elf_builder.h8
-rw-r--r--compiler/trampolines/trampoline_compiler.cc32
-rw-r--r--compiler/utils/assembler.cc3
-rw-r--r--compiler/utils/assembler.h4
-rw-r--r--compiler/utils/managed_register.h4
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc1036
-rw-r--r--compiler/utils/mips64/assembler_mips64.h294
-rw-r--r--compiler/utils/mips64/constants_mips64.h86
-rw-r--r--compiler/utils/mips64/managed_register_mips64.cc50
-rw-r--r--compiler/utils/mips64/managed_register_mips64.h121
13 files changed, 1644 insertions, 1 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index db338f0538..ad9b266456 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -122,6 +122,8 @@ LIBART_COMPILER_SRC_FILES := \
utils/dwarf_cfi.cc \
utils/mips/assembler_mips.cc \
utils/mips/managed_register_mips.cc \
+ utils/mips64/assembler_mips64.cc \
+ utils/mips64/managed_register_mips64.cc \
utils/x86/assembler_x86.cc \
utils/x86/managed_register_x86.cc \
utils/x86_64/assembler_x86_64.cc \
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 234e8b96f6..22be28c4d9 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -63,6 +63,7 @@ size_t CompiledCode::CodeDelta(InstructionSet instruction_set) {
case kArm:
case kArm64:
case kMips:
+ case kMips64:
case kX86:
case kX86_64:
return 0;
@@ -82,6 +83,7 @@ const void* CompiledCode::CodePointer(const void* code_pointer,
case kArm:
case kArm64:
case kMips:
+ case kMips64:
case kX86:
case kX86_64:
return code_pointer;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 9985d66469..968c77ef9a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -363,7 +363,7 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
dump_passes_(dump_passes),
timings_logger_(timer),
compiler_context_(nullptr),
- support_boot_image_fixup_(instruction_set != kMips),
+ support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
dedupe_code_("dedupe code", *swap_space_allocator_),
dedupe_src_mapping_table_("dedupe source mapping table", *swap_space_allocator_),
dedupe_mapping_table_("dedupe mapping table", *swap_space_allocator_),
@@ -2094,6 +2094,7 @@ static bool InstructionSetHasGenericJniStub(InstructionSet isa) {
case kArm64:
case kThumb2:
case kMips:
+ case kMips64:
case kX86:
case kX86_64: return true;
default: return false;
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 273b62deee..94268de077 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -1108,6 +1108,14 @@ class ElfBuilder FINAL {
EF_MIPS_ARCH_32R2);
break;
}
+ case kMips64: {
+ elf_header_.e_machine = EM_MIPS;
+ elf_header_.e_flags = (EF_MIPS_NOREORDER |
+ EF_MIPS_PIC |
+ EF_MIPS_CPIC |
+ EF_MIPS_ARCH_64R6);
+ break;
+ }
default: {
fatal_error_ = true;
LOG(FATAL) << "Unknown instruction set: " << isa;
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 385d1340fc..cb51ed8fc8 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -20,6 +20,7 @@
#include "utils/arm/assembler_arm.h"
#include "utils/arm64/assembler_arm64.h"
#include "utils/mips/assembler_mips.h"
+#include "utils/mips64/assembler_mips64.h"
#include "utils/x86/assembler_x86.h"
#include "utils/x86_64/assembler_x86_64.h"
@@ -120,6 +121,35 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention
}
} // namespace mips
+namespace mips64 {
+static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
+ ThreadOffset<8> offset) {
+ std::unique_ptr<Mips64Assembler> assembler(static_cast<Mips64Assembler*>(Assembler::Create(kMips64)));
+
+ switch (abi) {
+ case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
+ __ LoadFromOffset(kLoadDoubleword, T9, A0, offset.Int32Value());
+ break;
+ case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (A0).
+ __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
+ __ LoadFromOffset(kLoadDoubleword, T9, T9, offset.Int32Value());
+ break;
+ case kQuickAbi: // Fall-through.
+ __ LoadFromOffset(kLoadDoubleword, T9, S1, offset.Int32Value());
+ }
+ __ Jr(T9);
+ __ Nop();
+ __ Break();
+
+ size_t cs = assembler->CodeSize();
+ std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ assembler->FinalizeInstructions(code);
+
+ return entry_stub.release();
+}
+} // namespace mips64
+
namespace x86 {
static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<4> offset) {
std::unique_ptr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
@@ -160,6 +190,8 @@ const std::vector<uint8_t>* CreateTrampoline64(InstructionSet isa, EntryPointCal
switch (isa) {
case kArm64:
return arm64::CreateTrampoline(abi, offset);
+ case kMips64:
+ return mips64::CreateTrampoline(abi, offset);
case kX86_64:
return x86_64::CreateTrampoline(offset);
default:
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 68345129c3..5340dd3a25 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -23,6 +23,7 @@
#include "arm/assembler_thumb2.h"
#include "arm64/assembler_arm64.h"
#include "mips/assembler_mips.h"
+#include "mips64/assembler_mips64.h"
#include "x86/assembler_x86.h"
#include "x86_64/assembler_x86_64.h"
#include "globals.h"
@@ -115,6 +116,8 @@ Assembler* Assembler::Create(InstructionSet instruction_set) {
return new arm64::Arm64Assembler();
case kMips:
return new mips::MipsAssembler();
+ case kMips64:
+ return new mips64::Mips64Assembler();
case kX86:
return new x86::X86Assembler();
case kX86_64:
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 134dda4b2c..923ecdbd9d 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -47,6 +47,9 @@ namespace arm64 {
namespace mips {
class MipsAssembler;
}
+namespace mips64 {
+ class Mips64Assembler;
+}
namespace x86 {
class X86Assembler;
}
@@ -120,6 +123,7 @@ class Label {
friend class arm::Thumb2Assembler;
friend class arm64::Arm64Assembler;
friend class mips::MipsAssembler;
+ friend class mips64::Mips64Assembler;
friend class x86::X86Assembler;
friend class x86_64::X86_64Assembler;
diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h
index bfb2829a32..bb62bca3b9 100644
--- a/compiler/utils/managed_register.h
+++ b/compiler/utils/managed_register.h
@@ -30,6 +30,9 @@ class Arm64ManagedRegister;
namespace mips {
class MipsManagedRegister;
}
+namespace mips64 {
+class Mips64ManagedRegister;
+}
namespace x86 {
class X86ManagedRegister;
@@ -54,6 +57,7 @@ class ManagedRegister {
arm::ArmManagedRegister AsArm() const;
arm64::Arm64ManagedRegister AsArm64() const;
mips::MipsManagedRegister AsMips() const;
+ mips64::Mips64ManagedRegister AsMips64() const;
x86::X86ManagedRegister AsX86() const;
x86_64::X86_64ManagedRegister AsX86_64() const;
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
new file mode 100644
index 0000000000..233ae7db3c
--- /dev/null
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -0,0 +1,1036 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_mips64.h"
+
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "memory_region.h"
+#include "thread.h"
+
+namespace art {
+namespace mips64 {
+
+void Mips64Assembler::Emit(int32_t value) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ buffer_.Emit<int32_t>(value);
+}
+
+void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd,
+ int shamt, int funct) {
+ CHECK_NE(rs, kNoGpuRegister);
+ CHECK_NE(rt, kNoGpuRegister);
+ CHECK_NE(rd, kNoGpuRegister);
+ int32_t encoding = opcode << kOpcodeShift |
+ static_cast<int32_t>(rs) << kRsShift |
+ static_cast<int32_t>(rt) << kRtShift |
+ static_cast<int32_t>(rd) << kRdShift |
+ shamt << kShamtShift |
+ funct;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) {
+ CHECK_NE(rs, kNoGpuRegister);
+ CHECK_NE(rt, kNoGpuRegister);
+ int32_t encoding = opcode << kOpcodeShift |
+ static_cast<int32_t>(rs) << kRsShift |
+ static_cast<int32_t>(rt) << kRtShift |
+ imm;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitJ(int opcode, int address) {
+ int32_t encoding = opcode << kOpcodeShift |
+ address;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd,
+int funct) {
+ CHECK_NE(ft, kNoFpuRegister);
+ CHECK_NE(fs, kNoFpuRegister);
+ CHECK_NE(fd, kNoFpuRegister);
+ int32_t encoding = opcode << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<int32_t>(ft) << kFtShift |
+ static_cast<int32_t>(fs) << kFsShift |
+ static_cast<int32_t>(fd) << kFdShift |
+ funct;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm) {
+ CHECK_NE(rt, kNoFpuRegister);
+ int32_t encoding = opcode << kOpcodeShift |
+ fmt << kFmtShift |
+ static_cast<int32_t>(rt) << kRtShift |
+ imm;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitBranch(GpuRegister rt, GpuRegister rs, Label* label, bool equal) {
+ int offset;
+ if (label->IsBound()) {
+ offset = label->Position() - buffer_.Size();
+ } else {
+ // Use the offset field of the branch instruction for linking the sites.
+ offset = label->position_;
+ label->LinkTo(buffer_.Size());
+ }
+ if (equal) {
+ Beq(rt, rs, (offset >> 2) & kBranchOffsetMask);
+ } else {
+ Bne(rt, rs, (offset >> 2) & kBranchOffsetMask);
+ }
+}
+
+void Mips64Assembler::EmitJump(Label* label, bool link) {
+ int offset;
+ if (label->IsBound()) {
+ offset = label->Position() - buffer_.Size();
+ } else {
+ // Use the offset field of the jump instruction for linking the sites.
+ offset = label->position_;
+ label->LinkTo(buffer_.Size());
+ }
+ if (link) {
+ Jal((offset >> 2) & kJumpOffsetMask);
+ } else {
+ J((offset >> 2) & kJumpOffsetMask);
+ }
+}
+
+int32_t Mips64Assembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) {
+ CHECK_ALIGNED(offset, 4);
+ CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset;
+
+ // Properly preserve only the bits supported in the instruction.
+ offset >>= 2;
+ if (is_jump) {
+ offset &= kJumpOffsetMask;
+ return (inst & ~kJumpOffsetMask) | offset;
+ } else {
+ offset &= kBranchOffsetMask;
+ return (inst & ~kBranchOffsetMask) | offset;
+ }
+}
+
+int Mips64Assembler::DecodeBranchOffset(int32_t inst, bool is_jump) {
+ // Sign-extend, then left-shift by 2.
+ if (is_jump) {
+ return (((inst & kJumpOffsetMask) << 6) >> 4);
+ } else {
+ return (((inst & kBranchOffsetMask) << 16) >> 14);
+ }
+}
+
+void Mips64Assembler::Bind(Label* label, bool is_jump) {
+ CHECK(!label->IsBound());
+ int bound_pc = buffer_.Size();
+ while (label->IsLinked()) {
+ int32_t position = label->Position();
+ int32_t next = buffer_.Load<int32_t>(position);
+ int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4;
+ int32_t encoded = Mips64Assembler::EncodeBranchOffset(offset, next, is_jump);
+ buffer_.Store<int32_t>(position, encoded);
+ label->position_ = Mips64Assembler::DecodeBranchOffset(next, is_jump);
+ }
+ label->BindTo(bound_pc);
+}
+
+void Mips64Assembler::Add(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x20);
+}
+
+void Mips64Assembler::Addi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x8, rs, rt, imm16);
+}
+
+void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x21);
+}
+
+void Mips64Assembler::Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x9, rs, rt, imm16);
+}
+
+void Mips64Assembler::Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x19, rs, rt, imm16);
+}
+
+void Mips64Assembler::Sub(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x22);
+}
+
+void Mips64Assembler::Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x23);
+}
+
+void Mips64Assembler::Mult(GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x18);
+}
+
+void Mips64Assembler::Multu(GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x19);
+}
+
+void Mips64Assembler::Div(GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1a);
+}
+
+void Mips64Assembler::Divu(GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1b);
+}
+
+void Mips64Assembler::And(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x24);
+}
+
+void Mips64Assembler::Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0xc, rs, rt, imm16);
+}
+
+void Mips64Assembler::Or(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x25);
+}
+
+void Mips64Assembler::Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0xd, rs, rt, imm16);
+}
+
+void Mips64Assembler::Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x26);
+}
+
+void Mips64Assembler::Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0xe, rs, rt, imm16);
+}
+
+void Mips64Assembler::Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x27);
+}
+
+void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rs, int shamt) {
+ EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x00);
+}
+
+void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rs, int shamt) {
+ EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x02);
+}
+
+void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rs, int shamt) {
+ EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x03);
+}
+
+void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x04);
+}
+
+void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x06);
+}
+
+void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x07);
+}
+
+void Mips64Assembler::Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x20, rs, rt, imm16);
+}
+
+void Mips64Assembler::Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x21, rs, rt, imm16);
+}
+
+void Mips64Assembler::Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x23, rs, rt, imm16);
+}
+
+void Mips64Assembler::Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x37, rs, rt, imm16);
+}
+
+void Mips64Assembler::Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x24, rs, rt, imm16);
+}
+
+void Mips64Assembler::Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x25, rs, rt, imm16);
+}
+
+void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
+ EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
+}
+
+void Mips64Assembler::Mfhi(GpuRegister rd) {
+ EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x10);
+}
+
+void Mips64Assembler::Mflo(GpuRegister rd) {
+ EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x12);
+}
+
+void Mips64Assembler::Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x28, rs, rt, imm16);
+}
+
+void Mips64Assembler::Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x29, rs, rt, imm16);
+}
+
+void Mips64Assembler::Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x2b, rs, rt, imm16);
+}
+
+void Mips64Assembler::Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x3f, rs, rt, imm16);
+}
+
+void Mips64Assembler::Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x2a);
+}
+
+void Mips64Assembler::Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x2b);
+}
+
+void Mips64Assembler::Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0xa, rs, rt, imm16);
+}
+
+void Mips64Assembler::Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0xb, rs, rt, imm16);
+}
+
+void Mips64Assembler::Beq(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x4, rs, rt, imm16);
+ Nop();
+}
+
+void Mips64Assembler::Bne(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x5, rs, rt, imm16);
+ Nop();
+}
+
+void Mips64Assembler::J(uint32_t address) {
+ EmitJ(0x2, address);
+ Nop();
+}
+
+void Mips64Assembler::Jal(uint32_t address) {
+ EmitJ(0x2, address);
+ Nop();
+}
+
+void Mips64Assembler::Jr(GpuRegister rs) {
+ EmitR(0, rs, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), 0, 0x09); // Jalr zero, rs
+ Nop();
+}
+
+void Mips64Assembler::Jalr(GpuRegister rs) {
+ EmitR(0, rs, static_cast<GpuRegister>(0), RA, 0, 0x09);
+ Nop();
+}
+
+void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
+}
+
+void Mips64Assembler::SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
+}
+
+void Mips64Assembler::MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
+}
+
+void Mips64Assembler::DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
+}
+
+void Mips64Assembler::AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
+ static_cast<FpuRegister>(fd), 0x0);
+}
+
+void Mips64Assembler::SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
+ static_cast<FpuRegister>(fd), 0x1);
+}
+
+void Mips64Assembler::MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
+ static_cast<FpuRegister>(fd), 0x2);
+}
+
+void Mips64Assembler::DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
+ static_cast<FpuRegister>(fd), 0x3);
+}
+
+void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x6);
+}
+
+void Mips64Assembler::MovD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), static_cast<FpuRegister>(fs),
+ static_cast<FpuRegister>(fd), 0x6);
+}
+
+void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) {
+ EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
+}
+
+void Mips64Assembler::Mtc1(FpuRegister ft, GpuRegister rs) {
+ EmitFR(0x11, 0x04, ft, static_cast<FpuRegister>(rs), static_cast<FpuRegister>(0), 0x0);
+}
+
+void Mips64Assembler::Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x31, rs, static_cast<GpuRegister>(ft), imm16);
+}
+
+void Mips64Assembler::Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x35, rs, static_cast<GpuRegister>(ft), imm16);
+}
+
+void Mips64Assembler::Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x39, rs, static_cast<GpuRegister>(ft), imm16);
+}
+
+void Mips64Assembler::Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
+ EmitI(0x3d, rs, static_cast<GpuRegister>(ft), imm16);
+}
+
+void Mips64Assembler::Break() {
+ EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
+ static_cast<GpuRegister>(0), 0, 0xD);
+}
+
+void Mips64Assembler::Nop() {
+ EmitR(0x0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
+ static_cast<GpuRegister>(0), 0, 0x0);
+}
+
+void Mips64Assembler::Move(GpuRegister rt, GpuRegister rs) {
+ EmitI(0x19, rs, rt, 0); // Daddiu
+}
+
+void Mips64Assembler::Clear(GpuRegister rt) {
+ EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rt, 0, 0x20);
+}
+
+void Mips64Assembler::Not(GpuRegister rt, GpuRegister rs) {
+ EmitR(0, static_cast<GpuRegister>(0), rs, rt, 0, 0x27);
+}
+
+void Mips64Assembler::Mul(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ Mult(rs, rt);
+ Mflo(rd);
+}
+
+void Mips64Assembler::Div(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ Div(rs, rt);
+ Mflo(rd);
+}
+
+void Mips64Assembler::Rem(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ Div(rs, rt);
+ Mfhi(rd);
+}
+
+void Mips64Assembler::AddConstant64(GpuRegister rt, GpuRegister rs, int32_t value) {
+ CHECK((value >= -32768) && (value <= 32766));
+ Daddiu(rt, rs, value);
+}
+
+void Mips64Assembler::LoadImmediate64(GpuRegister rt, int32_t value) {
+ CHECK((value >= -32768) && (value <= 32766));
+ Daddiu(rt, ZERO, value);
+}
+
+void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
+ int32_t offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ Lb(reg, base, offset);
+ break;
+ case kLoadUnsignedByte:
+ Lbu(reg, base, offset);
+ break;
+ case kLoadSignedHalfword:
+ Lh(reg, base, offset);
+ break;
+ case kLoadUnsignedHalfword:
+ Lhu(reg, base, offset);
+ break;
+ case kLoadWord:
+ Lw(reg, base, offset);
+ break;
+ case kLoadDoubleword:
+ // TODO: alignment issues ???
+ Ld(reg, base, offset);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base,
+ int32_t offset) {
+ CHECK((offset >= -32768) && (offset <= 32766));
+ switch (type) {
+ case kLoadWord:
+ Lwc1(reg, base, offset);
+ break;
+ case kLoadDoubleword:
+ // TODO: alignment issues ???
+ Ldc1(reg, base, offset);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset,
+ size_t size) {
+ Mips64ManagedRegister dst = m_dst.AsMips64();
+ if (dst.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dst;
+ } else if (dst.IsGpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size) << dst;
+ LoadFromOffset(kLoadWord, dst.AsGpuRegister(), src_register, src_offset);
+ } else if (size == 8) {
+ CHECK_EQ(8u, size) << dst;
+ LoadFromOffset(kLoadDoubleword, dst.AsGpuRegister(), src_register, src_offset);
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
+ }
+ } else if (dst.IsFpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size) << dst;
+ LoadFpuFromOffset(kLoadWord, dst.AsFpuRegister(), src_register, src_offset);
+ } else if (size == 8) {
+ CHECK_EQ(8u, size) << dst;
+ LoadFpuFromOffset(kLoadDoubleword, dst.AsFpuRegister(), src_register, src_offset);
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
+ }
+ }
+}
+
+void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base,
+ int32_t offset) {
+ switch (type) {
+ case kStoreByte:
+ Sb(reg, base, offset);
+ break;
+ case kStoreHalfword:
+ Sh(reg, base, offset);
+ break;
+ case kStoreWord:
+ Sw(reg, base, offset);
+ break;
+ case kStoreDoubleword:
+ // TODO: alignment issues ???
+ Sd(reg, base, offset);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+void Mips64Assembler::StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base,
+ int32_t offset) {
+ switch (type) {
+ case kStoreWord:
+ Swc1(reg, base, offset);
+ break;
+ case kStoreDoubleword:
+ Sdc1(reg, base, offset);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+constexpr size_t kFramePointerSize = 8;
+
+void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+
+ // Increase frame to required size.
+ IncreaseFrameSize(frame_size);
+
+ // Push callee saves and return address
+ int stack_offset = frame_size - kFramePointerSize;
+ StoreToOffset(kStoreDoubleword, RA, SP, stack_offset);
+ for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
+ stack_offset -= kFramePointerSize;
+ GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
+ StoreToOffset(kStoreDoubleword, reg, SP, stack_offset);
+ }
+
+ // Write out Method*.
+ StoreToOffset(kStoreWord, method_reg.AsMips64().AsGpuRegister(), SP, 0);
+
+ // Write out entry spills.
+ int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ Mips64ManagedRegister reg = entry_spills.at(i).AsMips64();
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ int32_t size = spill.getSize();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ offset += size;
+ } else if (reg.IsFpuRegister()) {
+ StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsFpuRegister(), SP, offset);
+ offset += size;
+ } else if (reg.IsGpuRegister()) {
+ StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsGpuRegister(), SP, offset);
+ offset += size;
+ }
+ }
+}
+
+void Mips64Assembler::RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+
+ // Pop callee saves and return address
+ int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
+ for (size_t i = 0; i < callee_save_regs.size(); ++i) {
+ GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
+ LoadFromOffset(kLoadDoubleword, reg, SP, stack_offset);
+ stack_offset += kFramePointerSize;
+ }
+ LoadFromOffset(kLoadDoubleword, RA, SP, stack_offset);
+
+ // Decrease frame to required size.
+ DecreaseFrameSize(frame_size);
+
+ // Then jump to the return address.
+ Jr(RA);
+}
+
+void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant64(SP, SP, -adjust);
+}
+
+void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant64(SP, SP, adjust);
+}
+
+void Mips64Assembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+ Mips64ManagedRegister src = msrc.AsMips64();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsGpuRegister()) {
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 8) {
+ StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
+ } else if (size == 4) {
+ StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
+ }
+ } else if (src.IsFpuRegister()) {
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 8) {
+ StoreFpuToOffset(kStoreDoubleword, src.AsFpuRegister(), SP, dest.Int32Value());
+ } else if (size == 4) {
+ StoreFpuToOffset(kStoreWord, src.AsFpuRegister(), SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
+ }
+ }
+}
+
+void Mips64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ Mips64ManagedRegister src = msrc.AsMips64();
+ CHECK(src.IsGpuRegister());
+ StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
+}
+
+void Mips64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ Mips64ManagedRegister src = msrc.AsMips64();
+ CHECK(src.IsGpuRegister());
+ StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
+}
+
+void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister mscratch) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ LoadImmediate64(scratch.AsGpuRegister(), imm);
+ StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
+}
+
+void Mips64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
+ ManagedRegister mscratch) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ LoadImmediate64(scratch.AsGpuRegister(), imm);
+ StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, dest.Int32Value());
+}
+
+void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ AddConstant64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
+ StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
+}
+
+void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+ StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
+}
+
+void Mips64Assembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
+ FrameOffset in_off, ManagedRegister mscratch) {
+ Mips64ManagedRegister src = msrc.AsMips64();
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
+ LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, in_off.Int32Value());
+ StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value() + 8);
+}
+
+void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ return EmitLoad(mdest, SP, src.Int32Value(), size);
+}
+
+void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) {
+ return EmitLoad(mdest, S1, src.Int32Value(), size);
+}
+
+void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ Mips64ManagedRegister dest = mdest.AsMips64();
+ CHECK(dest.IsGpuRegister());
+ LoadFromOffset(kLoadWord, dest.AsGpuRegister(), SP, src.Int32Value());
+}
+
+void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+ MemberOffset offs) {
+ Mips64ManagedRegister dest = mdest.AsMips64();
+ CHECK(dest.IsGpuRegister() && dest.IsGpuRegister());
+ LoadFromOffset(kLoadWord, dest.AsGpuRegister(),
+ base.AsMips64().AsGpuRegister(), offs.Int32Value());
+ if (kPoisonHeapReferences) {
+ Subu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
+ }
+}
+
+void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs) {
+ Mips64ManagedRegister dest = mdest.AsMips64();
+ CHECK(dest.IsGpuRegister() && dest.IsGpuRegister()) << dest;
+ LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(),
+ base.AsMips64().AsGpuRegister(), offs.Int32Value());
+}
+
+void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
+ ThreadOffset<8> offs) {
+ Mips64ManagedRegister dest = mdest.AsMips64();
+ CHECK(dest.IsGpuRegister());
+ LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
+}
+
+void Mips64Assembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips";
+}
+
+void Mips64Assembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
+}
+
+void Mips64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+ Mips64ManagedRegister dest = mdest.AsMips64();
+ Mips64ManagedRegister src = msrc.AsMips64();
+ if (!dest.Equals(src)) {
+ if (dest.IsGpuRegister()) {
+ CHECK(src.IsGpuRegister()) << src;
+ Move(dest.AsGpuRegister(), src.AsGpuRegister());
+ } else if (dest.IsFpuRegister()) {
+ CHECK(src.IsFpuRegister()) << src;
+ if (size == 4) {
+ MovS(dest.AsFpuRegister(), src.AsFpuRegister());
+ } else if (size == 8) {
+ MovD(dest.AsFpuRegister(), src.AsFpuRegister());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+ }
+ }
+}
+
+void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
+}
+
+void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
+ ThreadOffset<8> thr_offs,
+ ManagedRegister mscratch) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
+ StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
+}
+
+void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
+ SP, fr_offs.Int32Value());
+ StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(),
+ S1, thr_offs.Int32Value());
+}
+
+void Mips64Assembler::Copy(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch, size_t size) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadFromOffset(kLoadWord, scratch, src_base.AsMips64().AsGpuRegister(),
+ src_offset.Int32Value());
+ StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(kLoadDoubleword, scratch, src_base.AsMips64().AsGpuRegister(),
+ src_offset.Int32Value());
+ StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Mips64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister mscratch, size_t size) {
+ GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch, dest_base.AsMips64().AsGpuRegister(),
+ dest_offset.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(kLoadDoubleword, scratch, SP, src.Int32Value());
+ StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
+ dest_offset.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Mips64Assembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no mips64 implementation";
+}
+
+void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadFromOffset(kLoadWord, scratch, src.AsMips64().AsGpuRegister(), src_offset.Int32Value());
+ StoreToOffset(kStoreWord, scratch, dest.AsMips64().AsGpuRegister(), dest_offset.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(kLoadDoubleword, scratch, src.AsMips64().AsGpuRegister(),
+ src_offset.Int32Value());
+ StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(),
+ dest_offset.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Mips64Assembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset
+/*src_offset*/,
+ ManagedRegister /*mscratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no mips64 implementation";
+}
+
+void Mips64Assembler::MemoryBarrier(ManagedRegister) {
+ UNIMPLEMENTED(FATAL) << "no mips64 implementation";
+}
+
+void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg, bool null_allowed) {
+ Mips64ManagedRegister out_reg = mout_reg.AsMips64();
+ Mips64ManagedRegister in_reg = min_reg.AsMips64();
+ CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg;
+ CHECK(out_reg.IsGpuRegister()) << out_reg;
+ if (null_allowed) {
+ Label null_arg;
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ LoadFromOffset(kLoadWord, out_reg.AsGpuRegister(),
+ SP, handle_scope_offset.Int32Value());
+ in_reg = out_reg;
+ }
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate64(out_reg.AsGpuRegister(), 0);
+ }
+ EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true);
+ AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
+ Bind(&null_arg, false);
+ } else {
+ AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
+ }
+}
+
+void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ if (null_allowed) {
+ Label null_arg;
+ LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP,
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ EmitBranch(scratch.AsGpuRegister(), ZERO, &null_arg, true);
+ AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
+ Bind(&null_arg, false);
+ } else {
+ AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
+ }
+ StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, out_off.Int32Value());
+}
+
+// Given a handle scope entry, load the associated reference.
+void Mips64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ Mips64ManagedRegister out_reg = mout_reg.AsMips64();
+ Mips64ManagedRegister in_reg = min_reg.AsMips64();
+ CHECK(out_reg.IsGpuRegister()) << out_reg;
+ CHECK(in_reg.IsGpuRegister()) << in_reg;
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate64(out_reg.AsGpuRegister(), 0);
+ }
+ EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true);
+ LoadFromOffset(kLoadDoubleword, out_reg.AsGpuRegister(),
+ in_reg.AsGpuRegister(), 0);
+ Bind(&null_arg, false);
+}
+
+void Mips64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void Mips64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void Mips64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
+ Mips64ManagedRegister base = mbase.AsMips64();
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(base.IsGpuRegister()) << base;
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
+ base.AsGpuRegister(), offset.Int32Value());
+ Jalr(scratch.AsGpuRegister());
+ // TODO: place reference map on call
+}
+
+void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ CHECK(scratch.IsGpuRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ LoadFromOffset(kLoadWord, scratch.AsGpuRegister(),
+ SP, base.Int32Value());
+ LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
+ scratch.AsGpuRegister(), offset.Int32Value());
+ Jalr(scratch.AsGpuRegister());
+ // TODO: place reference map on call
+}
+
+void Mips64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*mscratch*/) {
+ UNIMPLEMENTED(FATAL) << "no mips64 implementation";
+}
+
+void Mips64Assembler::GetCurrentThread(ManagedRegister tr) {
+ Move(tr.AsMips64().AsGpuRegister(), S1);
+}
+
+void Mips64Assembler::GetCurrentThread(FrameOffset offset,
+ ManagedRegister /*mscratch*/) {
+ StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value());
+}
+
+void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
+ Mips64ManagedRegister scratch = mscratch.AsMips64();
+ Mips64ExceptionSlowPath* slow = new Mips64ExceptionSlowPath(scratch, stack_adjust);
+ buffer_.EnqueueSlowPath(slow);
+ LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
+ S1, Thread::ExceptionOffset<8>().Int32Value());
+ EmitBranch(scratch.AsGpuRegister(), ZERO, slow->Entry(), false);
+}
+
+void Mips64ExceptionSlowPath::Emit(Assembler* sasm) {
+ Mips64Assembler* sp_asm = down_cast<Mips64Assembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_, false);
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ __ DecreaseFrameSize(stack_adjust_);
+ }
+ // Pass exception object as argument
+ // Don't care about preserving A0 as this call won't return
+ __ Move(A0, scratch_.AsGpuRegister());
+ // Set up call to Thread::Current()->pDeliverException
+ __ LoadFromOffset(kLoadDoubleword, T9, S1,
+ QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
+ __ Jr(T9);
+ // Call never returns
+ __ Break();
+#undef __
+}
+
+} // namespace mips64
+} // namespace art
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
new file mode 100644
index 0000000000..36e74d7cb2
--- /dev/null
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
+#define ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
+
+#include <vector>
+
+#include "base/macros.h"
+#include "constants_mips64.h"
+#include "globals.h"
+#include "managed_register_mips64.h"
+#include "utils/assembler.h"
+#include "offsets.h"
+#include "utils.h"
+
+namespace art {
+namespace mips64 {
+
+enum LoadOperandType {
+ kLoadSignedByte,
+ kLoadUnsignedByte,
+ kLoadSignedHalfword,
+ kLoadUnsignedHalfword,
+ kLoadWord,
+ kLoadDoubleword
+};
+
+enum StoreOperandType {
+ kStoreByte,
+ kStoreHalfword,
+ kStoreWord,
+ kStoreDoubleword
+};
+
+class Mips64Assembler FINAL : public Assembler {
+ public:
+ Mips64Assembler() {}
+ virtual ~Mips64Assembler() {}
+
+ // Emit Machine Instructions.
+ void Add(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Addi(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Sub(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Mult(GpuRegister rs, GpuRegister rt);
+ void Multu(GpuRegister rs, GpuRegister rt);
+ void Div(GpuRegister rs, GpuRegister rt);
+ void Divu(GpuRegister rs, GpuRegister rt);
+
+ void And(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Or(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+
+ void Sll(GpuRegister rd, GpuRegister rs, int shamt);
+ void Srl(GpuRegister rd, GpuRegister rs, int shamt);
+ void Sra(GpuRegister rd, GpuRegister rs, int shamt);
+ void Sllv(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Srlv(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Srav(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+
+ void Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Lui(GpuRegister rt, uint16_t imm16);
+ void Mfhi(GpuRegister rd);
+ void Mflo(GpuRegister rd);
+
+ void Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+
+ void Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+
+ void Beq(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Bne(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void J(uint32_t address);
+ void Jal(uint32_t address);
+ void Jr(GpuRegister rs);
+ void Jalr(GpuRegister rs);
+
+ void AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void MovS(FpuRegister fd, FpuRegister fs);
+ void MovD(FpuRegister fd, FpuRegister fs);
+
+ void Mfc1(GpuRegister rt, FpuRegister fs);
+ void Mtc1(FpuRegister ft, GpuRegister rs);
+ void Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
+ void Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
+ void Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
+ void Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
+
+ void Break();
+ void Nop();
+ void Move(GpuRegister rt, GpuRegister rs);
+ void Clear(GpuRegister rt);
+ void Not(GpuRegister rt, GpuRegister rs);
+ void Mul(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Div(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Rem(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+
+ void AddConstant64(GpuRegister rt, GpuRegister rs, int32_t value);
+ void LoadImmediate64(GpuRegister rt, int32_t value);
+
+ void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size);
+ void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
+ void LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
+ void StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
+ void StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
+
+ // Emit data (e.g. encoded instruction or immediate) to the instruction stream.
+ void Emit(int32_t value);
+ void EmitBranch(GpuRegister rt, GpuRegister rs, Label* label, bool equal);
+ void EmitJump(Label* label, bool link);
+ void Bind(Label* label, bool is_jump);
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs) OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister msrc, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
+
+ void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
+ ManagedRegister mscratch) OVERRIDE;
+
+ void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ ManagedRegister mscratch) OVERRIDE;
+
+ void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
+ ManagedRegister mscratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset<8> offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
+
+ void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
+ ManagedRegister mscratch) OVERRIDE;
+
+ void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
+ ManagedRegister mscratch) OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister mscratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister mscratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister mscratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
+ ManagedRegister mscratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister mscratch, size_t size) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be NULL if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // NULL.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be NULL if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
+ mscratch, bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread64(ThreadOffset<8> offset, ManagedRegister mscratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+ void EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd, int shamt, int funct);
+ void EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm);
+ void EmitJ(int opcode, int address);
+ void EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd, int funct);
+ void EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm);
+
+ int32_t EncodeBranchOffset(int offset, int32_t inst, bool is_jump);
+ int DecodeBranchOffset(int32_t inst, bool is_jump);
+
+ DISALLOW_COPY_AND_ASSIGN(Mips64Assembler);
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class Mips64ExceptionSlowPath FINAL : public SlowPath {
+ public:
+ explicit Mips64ExceptionSlowPath(Mips64ManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const Mips64ManagedRegister scratch_;
+ const size_t stack_adjust_;
+};
+
+} // namespace mips64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
diff --git a/compiler/utils/mips64/constants_mips64.h b/compiler/utils/mips64/constants_mips64.h
new file mode 100644
index 0000000000..8b7697cac3
--- /dev/null
+++ b/compiler/utils/mips64/constants_mips64.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_MIPS64_CONSTANTS_MIPS64_H_
+#define ART_COMPILER_UTILS_MIPS64_CONSTANTS_MIPS64_H_
+
+#include <iosfwd>
+
+#include "arch/mips64/registers_mips64.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "globals.h"
+
+namespace art {
+namespace mips64 {
+
+// Constants used for the decoding or encoding of the individual fields of instructions.
+enum InstructionFields {
+ kOpcodeShift = 26,
+ kOpcodeBits = 6,
+ kRsShift = 21,
+ kRsBits = 5,
+ kRtShift = 16,
+ kRtBits = 5,
+ kRdShift = 11,
+ kRdBits = 5,
+ kShamtShift = 6,
+ kShamtBits = 5,
+ kFunctShift = 0,
+ kFunctBits = 6,
+
+ kFmtShift = 21,
+ kFmtBits = 5,
+ kFtShift = 16,
+ kFtBits = 5,
+ kFsShift = 11,
+ kFsBits = 5,
+ kFdShift = 6,
+ kFdBits = 5,
+
+ kBranchOffsetMask = 0x0000ffff,
+ kJumpOffsetMask = 0x03ffffff,
+};
+
+enum ScaleFactor {
+ TIMES_1 = 0,
+ TIMES_2 = 1,
+ TIMES_4 = 2,
+ TIMES_8 = 3
+};
+
+class Instr {
+ public:
+ static const uint32_t kBreakPointInstruction = 0x0000000D;
+
+ bool IsBreakPoint() {
+ return ((*reinterpret_cast<const uint32_t*>(this)) & 0xFC0000CF) == kBreakPointInstruction;
+ }
+
+ // Instructions are read out of a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instr.
+ // Use the At(pc) function to create references to Instr.
+ static Instr* At(uintptr_t pc) { return reinterpret_cast<Instr*>(pc); }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+};
+
+} // namespace mips64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_MIPS64_CONSTANTS_MIPS64_H_
diff --git a/compiler/utils/mips64/managed_register_mips64.cc b/compiler/utils/mips64/managed_register_mips64.cc
new file mode 100644
index 0000000000..dea396e4a7
--- /dev/null
+++ b/compiler/utils/mips64/managed_register_mips64.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_mips64.h"
+
+#include "globals.h"
+
+namespace art {
+namespace mips64 {
+
+bool Mips64ManagedRegister::Overlaps(const Mips64ManagedRegister& other) const {
+ if (IsNoRegister() || other.IsNoRegister()) return false;
+ CHECK(IsValidManagedRegister());
+ CHECK(other.IsValidManagedRegister());
+ if (Equals(other)) return true;
+ return false;
+}
+
+void Mips64ManagedRegister::Print(std::ostream& os) const {
+ if (!IsValidManagedRegister()) {
+ os << "No Register";
+ } else if (IsGpuRegister()) {
+ os << "GPU: " << static_cast<int>(AsGpuRegister());
+ } else if (IsFpuRegister()) {
+ os << "FpuRegister: " << static_cast<int>(AsFpuRegister());
+ } else {
+ os << "??: " << RegId();
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const Mips64ManagedRegister& reg) {
+ reg.Print(os);
+ return os;
+}
+
+} // namespace mips64
+} // namespace art
diff --git a/compiler/utils/mips64/managed_register_mips64.h b/compiler/utils/mips64/managed_register_mips64.h
new file mode 100644
index 0000000000..924a928389
--- /dev/null
+++ b/compiler/utils/mips64/managed_register_mips64.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_MIPS64_MANAGED_REGISTER_MIPS64_H_
+#define ART_COMPILER_UTILS_MIPS64_MANAGED_REGISTER_MIPS64_H_
+
+#include "constants_mips64.h"
+#include "utils/managed_register.h"
+
+namespace art {
+namespace mips64 {
+
+const int kNumberOfGpuRegIds = kNumberOfGpuRegisters;
+const int kNumberOfGpuAllocIds = kNumberOfGpuRegisters;
+
+const int kNumberOfFpuRegIds = kNumberOfFpuRegisters;
+const int kNumberOfFpuAllocIds = kNumberOfFpuRegisters;
+
+const int kNumberOfRegIds = kNumberOfGpuRegIds + kNumberOfFpuRegIds;
+const int kNumberOfAllocIds = kNumberOfGpuAllocIds + kNumberOfFpuAllocIds;
+
+// An instance of class 'ManagedRegister' represents a single GPU register (enum
+// Register) or a double precision FP register (enum FpuRegister)
+// 'ManagedRegister::NoRegister()' provides an invalid register.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class Mips64ManagedRegister : public ManagedRegister {
+ public:
+ GpuRegister AsGpuRegister() const {
+ CHECK(IsGpuRegister());
+ return static_cast<GpuRegister>(id_);
+ }
+
+ FpuRegister AsFpuRegister() const {
+ CHECK(IsFpuRegister());
+ return static_cast<FpuRegister>(id_ - kNumberOfGpuRegIds);
+ }
+
+ bool IsGpuRegister() const {
+ CHECK(IsValidManagedRegister());
+ return (0 <= id_) && (id_ < kNumberOfGpuRegIds);
+ }
+
+ bool IsFpuRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - kNumberOfGpuRegIds;
+ return (0 <= test) && (test < kNumberOfFpuRegIds);
+ }
+
+ void Print(std::ostream& os) const;
+
+ // Returns true if the two managed-registers ('this' and 'other') overlap.
+ // Either managed-register may be the NoRegister. If both are the NoRegister
+ // then false is returned.
+ bool Overlaps(const Mips64ManagedRegister& other) const;
+
+ static Mips64ManagedRegister FromGpuRegister(GpuRegister r) {
+ CHECK_NE(r, kNoGpuRegister);
+ return FromRegId(r);
+ }
+
+ static Mips64ManagedRegister FromFpuRegister(FpuRegister r) {
+ CHECK_NE(r, kNoFpuRegister);
+ return FromRegId(r + kNumberOfGpuRegIds);
+ }
+
+ private:
+ bool IsValidManagedRegister() const {
+ return (0 <= id_) && (id_ < kNumberOfRegIds);
+ }
+
+ int RegId() const {
+ CHECK(!IsNoRegister());
+ return id_;
+ }
+
+ int AllocId() const {
+ CHECK(IsValidManagedRegister());
+ CHECK_LT(id_, kNumberOfAllocIds);
+ return id_;
+ }
+
+ int AllocIdLow() const;
+ int AllocIdHigh() const;
+
+ friend class ManagedRegister;
+
+ explicit Mips64ManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+ static Mips64ManagedRegister FromRegId(int reg_id) {
+ Mips64ManagedRegister reg(reg_id);
+ CHECK(reg.IsValidManagedRegister());
+ return reg;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const Mips64ManagedRegister& reg);
+
+} // namespace mips64
+
+inline mips64::Mips64ManagedRegister ManagedRegister::AsMips64() const {
+ mips64::Mips64ManagedRegister reg(id_);
+ CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+ return reg;
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_MIPS64_MANAGED_REGISTER_MIPS64_H_