summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/dex/compiler_enums.h18
-rw-r--r--compiler/dex/frontend.cc4
-rw-r--r--compiler/dex/mir_analysis.cc10
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc4
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc2
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc4
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc4
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc14
-rw-r--r--compiler/dex/quick/x86/call_x86.cc35
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h6
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc31
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc42
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc351
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc14
-rw-r--r--compiler/dex/quick/x86/x86_lir.h22
-rw-r--r--compiler/dex/reg_storage.h3
-rw-r--r--compiler/image_writer.cc4
-rw-r--r--compiler/optimizing/builder.cc58
-rw-r--r--compiler/optimizing/builder.h5
-rw-r--r--compiler/optimizing/code_generator.cc10
-rw-r--r--compiler/optimizing/code_generator_arm.cc164
-rw-r--r--compiler/optimizing/code_generator_arm64.cc19
-rw-r--r--compiler/optimizing/code_generator_x86.cc173
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc196
-rw-r--r--compiler/optimizing/graph_checker.cc42
-rw-r--r--compiler/optimizing/graph_checker.h11
-rw-r--r--compiler/optimizing/locations.h15
-rw-r--r--compiler/optimizing/nodes.h42
-rw-r--r--compiler/optimizing/register_allocator.cc4
-rw-r--r--compiler/optimizing/register_allocator_test.cc6
30 files changed, 890 insertions, 423 deletions
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 1297ba9c7f..5d877fdf80 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -38,6 +38,7 @@ enum BitsUsed {
kSize512Bits,
kSize1024Bits,
};
+std::ostream& operator<<(std::ostream& os, const BitsUsed& rhs);
enum SpecialTargetRegister {
kSelf, // Thread pointer.
@@ -76,6 +77,7 @@ enum SpecialTargetRegister {
kHiddenFpArg,
kCount
};
+std::ostream& operator<<(std::ostream& os, const SpecialTargetRegister& code);
enum RegLocationType {
kLocDalvikFrame = 0, // Normal Dalvik register
@@ -344,6 +346,7 @@ enum AssemblerStatus {
kSuccess,
kRetryAll,
};
+std::ostream& operator<<(std::ostream& os, const AssemblerStatus& rhs);
enum OpSize {
kWord, // Natural word size of target (32/64).
@@ -357,7 +360,6 @@ enum OpSize {
kUnsignedByte,
kSignedByte,
};
-
std::ostream& operator<<(std::ostream& os, const OpSize& kind);
enum OpKind {
@@ -399,6 +401,7 @@ enum OpKind {
kOpBx,
kOpInvalid,
};
+std::ostream& operator<<(std::ostream& os, const OpKind& rhs);
enum MoveType {
kMov8GP, // Move 8-bit general purpose register.
@@ -415,8 +418,7 @@ enum MoveType {
kMovLo128FP, // Move low 64-bits of 128-bit FP register.
kMovHi128FP, // Move high 64-bits of 128-bit FP register.
};
-
-std::ostream& operator<<(std::ostream& os, const OpKind& kind);
+std::ostream& operator<<(std::ostream& os, const MoveType& kind);
enum ConditionCode {
kCondEq, // equal
@@ -438,7 +440,6 @@ enum ConditionCode {
kCondAl, // always
kCondNv, // never
};
-
std::ostream& operator<<(std::ostream& os, const ConditionCode& kind);
// Target specific condition encodings
@@ -460,7 +461,6 @@ enum ArmConditionCode {
kArmCondAl = 0xe, // 1110
kArmCondNv = 0xf, // 1111
};
-
std::ostream& operator<<(std::ostream& os, const ArmConditionCode& kind);
enum X86ConditionCode {
@@ -508,7 +508,6 @@ enum X86ConditionCode {
kX86CondNle = 0xf, // not-less-than
kX86CondG = kX86CondNle, // greater
};
-
std::ostream& operator<<(std::ostream& os, const X86ConditionCode& kind);
enum DividePattern {
@@ -517,7 +516,6 @@ enum DividePattern {
Divide5,
Divide7,
};
-
std::ostream& operator<<(std::ostream& os, const DividePattern& pattern);
/**
@@ -543,7 +541,6 @@ enum MemBarrierKind {
kAnyAny,
kNTStoreStore,
};
-
std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
enum OpFeatureFlags {
@@ -600,6 +597,7 @@ enum OpFeatureFlags {
kDefHi,
kDefLo
};
+std::ostream& operator<<(std::ostream& os, const OpFeatureFlags& rhs);
enum SelectInstructionKind {
kSelectNone,
@@ -607,7 +605,6 @@ enum SelectInstructionKind {
kSelectMove,
kSelectGoto
};
-
std::ostream& operator<<(std::ostream& os, const SelectInstructionKind& kind);
// LIR fixup kinds for Arm
@@ -629,14 +626,12 @@ enum FixupKind {
kFixupMovImmHST, // kThumb2MovImm16HST.
kFixupAlign4, // Align to 4-byte boundary.
};
-
std::ostream& operator<<(std::ostream& os, const FixupKind& kind);
enum VolatileKind {
kNotVolatile, // Load/Store is not volatile
kVolatile // Load/Store is volatile
};
-
std::ostream& operator<<(std::ostream& os, const VolatileKind& kind);
enum WideKind {
@@ -644,7 +639,6 @@ enum WideKind {
kWide, // Wide view
kRef // Ref width
};
-
std::ostream& operator<<(std::ostream& os, const WideKind& kind);
} // namespace art
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index a1e2caa651..3f6231cb1f 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -88,9 +88,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
return nullptr;
}
- if (!driver.GetCompilerOptions().IsCompilationEnabled()) {
- return nullptr;
- }
+ DCHECK(driver.GetCompilerOptions().IsCompilationEnabled());
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
CompilationUnit cu(driver.GetArenaPool());
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index ee48796996..44f69ba674 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1112,14 +1112,11 @@ bool MIRGraph::SkipCompilation(std::string* skip_message) {
return true;
}
- if (!compiler_options.IsCompilationEnabled()) {
- *skip_message = "Compilation disabled";
- return true;
- }
+ DCHECK(compiler_options.IsCompilationEnabled());
// Set up compilation cutoffs based on current filter mode.
- size_t small_cutoff = 0;
- size_t default_cutoff = 0;
+ size_t small_cutoff;
+ size_t default_cutoff;
switch (compiler_filter) {
case CompilerOptions::kBalanced:
small_cutoff = compiler_options.GetSmallMethodThreshold();
@@ -1136,6 +1133,7 @@ bool MIRGraph::SkipCompilation(std::string* skip_message) {
break;
default:
LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
+ UNREACHABLE();
}
// If size < cutoff, assume we'll compile - but allow removal.
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index a1a5ad1d1f..0d5aa90f35 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -959,7 +959,7 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag
// TODO: in future may need to differentiate Dalvik accesses w/ spills
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rARM_SP);
+ DCHECK_EQ(r_base, rs_rARM_SP);
AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
}
return load;
@@ -1088,7 +1088,7 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora
// TODO: In future, may need to differentiate Dalvik & spill accesses
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rARM_SP);
+ DCHECK_EQ(r_base, rs_rARM_SP);
AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
}
return store;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index fc72e02c55..e57f99ce9b 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -1663,7 +1663,7 @@ static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32
void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
int frame_size) {
- DCHECK(base == rs_sp);
+ DCHECK_EQ(base, rs_sp);
// Restore saves and drop stack frame.
// 2 versions:
//
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index fcd69ec4a3..78a6df8a1c 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -1266,7 +1266,7 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
// TODO: in future may need to differentiate Dalvik accesses w/ spills
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_sp);
+ DCHECK_EQ(r_base, rs_sp);
AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
}
return load;
@@ -1357,7 +1357,7 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
// TODO: In future, may need to differentiate Dalvik & spill accesses.
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_sp);
+ DCHECK_EQ(r_base, rs_sp);
AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
}
return store;
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 044972cc5f..a7dc84f6aa 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -544,7 +544,7 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rMIPS_SP);
+ DCHECK_EQ(r_base, rs_rMIPS_SP);
AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
true /* is_load */, pair /* is64bit */);
if (pair) {
@@ -646,7 +646,7 @@ LIR* MipsMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement,
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rMIPS_SP);
+ DCHECK_EQ(r_base, rs_rMIPS_SP);
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
false /* is_load */, pair /* is64bit */);
if (pair) {
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index ef55054d6d..3933b21a26 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -677,7 +677,7 @@ size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int
++size; // modrm
}
if (!modrm_is_reg_reg) {
- if (has_sib || LowRegisterBits(raw_base) == rs_rX86_SP.GetRegNum()
+ if (has_sib || (LowRegisterBits(raw_base) == rs_rX86_SP_32.GetRegNum())
|| (cu_->target64 && entry->skeleton.prefix1 == THREAD_PREFIX)) {
// SP requires a SIB byte.
// GS access also needs a SIB byte for absolute adressing in 64-bit mode.
@@ -1010,9 +1010,9 @@ void X86Mir2Lir::EmitDisp(uint8_t base, int32_t disp) {
void X86Mir2Lir::EmitModrmThread(uint8_t reg_or_opcode) {
if (cu_->target64) {
// Absolute adressing for GS access.
- uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rX86_SP.GetRegNum();
+ uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rX86_SP_32.GetRegNum();
code_buffer_.push_back(modrm);
- uint8_t sib = (0/*TIMES_1*/ << 6) | (rs_rX86_SP.GetRegNum() << 3) | rs_rBP.GetRegNum();
+ uint8_t sib = (0/*TIMES_1*/ << 6) | (rs_rX86_SP_32.GetRegNum() << 3) | rs_rBP.GetRegNum();
code_buffer_.push_back(sib);
} else {
uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rBP.GetRegNum();
@@ -1025,9 +1025,9 @@ void X86Mir2Lir::EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int32_t disp
DCHECK_LT(base, 8);
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | base;
code_buffer_.push_back(modrm);
- if (base == rs_rX86_SP.GetRegNum()) {
+ if (base == rs_rX86_SP_32.GetRegNum()) {
// Special SIB for SP base
- code_buffer_.push_back(0 << 6 | rs_rX86_SP.GetRegNum() << 3 | rs_rX86_SP.GetRegNum());
+ code_buffer_.push_back(0 << 6 | rs_rX86_SP_32.GetRegNum() << 3 | rs_rX86_SP_32.GetRegNum());
}
EmitDisp(base, disp);
}
@@ -1036,7 +1036,7 @@ void X86Mir2Lir::EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t i
int scale, int32_t disp) {
DCHECK_LT(RegStorage::RegNum(reg_or_opcode), 8);
uint8_t modrm = (ModrmForDisp(base, disp) << 6) | RegStorage::RegNum(reg_or_opcode) << 3 |
- rs_rX86_SP.GetRegNum();
+ rs_rX86_SP_32.GetRegNum();
code_buffer_.push_back(modrm);
DCHECK_LT(scale, 4);
DCHECK_LT(RegStorage::RegNum(index), 8);
@@ -1584,7 +1584,7 @@ void X86Mir2Lir::EmitPcRel(const X86EncodingMap* entry, int32_t raw_reg, int32_t
DCHECK_EQ(0, entry->skeleton.extra_opcode1);
DCHECK_EQ(0, entry->skeleton.extra_opcode2);
uint8_t low_reg = LowRegisterBits(raw_reg);
- uint8_t modrm = (2 << 6) | (low_reg << 3) | rs_rX86_SP.GetRegNum();
+ uint8_t modrm = (2 << 6) | (low_reg << 3) | rs_rX86_SP_32.GetRegNum();
code_buffer_.push_back(modrm);
DCHECK_LT(scale, 4);
uint8_t low_base_or_table = LowRegisterBits(raw_base_or_table);
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 497ef94c27..61dcc28afc 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -164,16 +164,20 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
* expanding the frame or flushing. This leaves the utility
* code with no spare temps.
*/
- LockTemp(rs_rX86_ARG0);
- LockTemp(rs_rX86_ARG1);
- LockTemp(rs_rX86_ARG2);
+ const RegStorage arg0 = TargetReg32(kArg0);
+ const RegStorage arg1 = TargetReg32(kArg1);
+ const RegStorage arg2 = TargetReg32(kArg2);
+ LockTemp(arg0);
+ LockTemp(arg1);
+ LockTemp(arg2);
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
+ const InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
// If we doing an implicit stack overflow check, perform the load immediately
// before the stack pointer is decremented and anything is saved.
@@ -182,12 +186,12 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
// Implicit stack overflow check.
// test eax,[esp + -overflow]
int overflow = GetStackOverflowReservedBytes(isa);
- NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rX86_SP.GetReg(), -overflow);
+ NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rSP.GetReg(), -overflow);
MarkPossibleStackOverflowException();
}
/* Build frame, return address already on stack */
- stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ -
+ stack_decrement_ = OpRegImm(kOpSub, rs_rSP, frame_size_ -
GetInstructionSetPointerSize(cu_->instruction_set));
NewLIR0(kPseudoMethodEntry);
@@ -204,7 +208,8 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
+ const RegStorage local_rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ m2l_->OpRegImm(kOpAdd, local_rs_rSP, sp_displace_);
m2l_->ClobberCallerSave();
// Assumes codegen and target are in thumb2 mode.
m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow,
@@ -225,9 +230,9 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
// may have moved us outside of the reserved area at the end of the stack.
// cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
if (cu_->target64) {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
+ OpRegThreadMem(kOpCmp, rs_rX86_SP_64, Thread::StackEndOffset<8>());
} else {
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
+ OpRegThreadMem(kOpCmp, rs_rX86_SP_32, Thread::StackEndOffset<4>());
}
LIR* branch = OpCondBranch(kCondUlt, nullptr);
AddSlowPath(
@@ -245,13 +250,13 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg());
int displacement = SRegOffset(base_of_code_->s_reg_low);
// Native pointer - must be natural word size.
- setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, method_start,
+ setup_method_address_[1] = StoreBaseDisp(rs_rSP, displacement, method_start,
cu_->target64 ? k64 : k32, kNotVolatile);
}
- FreeTemp(rs_rX86_ARG0);
- FreeTemp(rs_rX86_ARG1);
- FreeTemp(rs_rX86_ARG2);
+ FreeTemp(arg0);
+ FreeTemp(arg1);
+ FreeTemp(arg2);
}
void X86Mir2Lir::GenExitSequence() {
@@ -266,7 +271,9 @@ void X86Mir2Lir::GenExitSequence() {
UnSpillCoreRegs();
UnSpillFPRegs();
/* Remove frame except for return address */
- stack_increment_ = OpRegImm(kOpAdd, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ stack_increment_ = OpRegImm(kOpAdd, rs_rSP,
+ frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
NewLIR0(kX86Ret);
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 4412a1e254..d57dffb01d 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -389,7 +389,7 @@ class X86Mir2Lir : public Mir2Lir {
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
protected:
- RegStorage TargetReg32(SpecialTargetRegister reg);
+ RegStorage TargetReg32(SpecialTargetRegister reg) const;
// Casting of RegStorage
RegStorage As32BitReg(RegStorage reg) {
DCHECK(!reg.IsPair());
@@ -432,7 +432,7 @@ class X86Mir2Lir : public Mir2Lir {
LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
RegStorage r_src, OpSize size, int opt_flags = 0);
- RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
+ RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num) const;
int AssignInsnOffsets();
void AssignOffsets();
@@ -530,7 +530,7 @@ class X86Mir2Lir : public Mir2Lir {
* @brief Check if a register is byte addressable.
* @returns true if a register is byte addressable.
*/
- bool IsByteRegister(RegStorage reg);
+ bool IsByteRegister(RegStorage reg) const;
void GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src, int64_t imm, bool is_div);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 33bb0eeb76..bc02eee669 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -159,12 +159,13 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(rs_rX86_SP, src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ StoreBaseDisp(rs_rSP, src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
}
}
// Push the source virtual register onto the x87 stack.
- LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP.GetReg(),
+ LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP_32.GetReg(),
src_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fild64, (src_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
@@ -172,7 +173,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
// Now pop off x87 stack and store it in the destination VR's stack location.
int opcode = is_double ? kX86Fstp64M : kX86Fstp32M;
int displacement = is_double ? dest_v_reg_offset + LOWORD_OFFSET : dest_v_reg_offset;
- LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement);
+ LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP_32.GetReg(), displacement);
AnnotateDalvikRegAccess(fstp, displacement >> 2, false /* is_load */, is_double);
/*
@@ -191,12 +192,13 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
* correct register class.
*/
rl_result = EvalLoc(rl_dest, kFPReg, true);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
if (is_double) {
- LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(rs_rSP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
- Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg);
+ Load32Disp(rs_rSP, dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
@@ -366,6 +368,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
// If the source is in physical register, then put it in its location on stack.
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
if (rl_src1.location == kLocPhysReg) {
RegisterInfo* reg_info = GetRegInfo(rl_src1.reg);
@@ -377,7 +380,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(rs_rX86_SP, src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
+ StoreBaseDisp(rs_rSP, src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
kNotVolatile);
}
}
@@ -388,7 +391,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
FlushSpecificReg(reg_info);
ResetDef(rl_src2.reg);
} else {
- StoreBaseDisp(rs_rX86_SP, src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
+ StoreBaseDisp(rs_rSP, src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
kNotVolatile);
}
}
@@ -396,12 +399,12 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
int fld_opcode = is_double ? kX86Fld64M : kX86Fld32M;
// Push the source virtual registers onto the x87 stack.
- LIR *fld_2 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(),
+ LIR *fld_2 = NewLIR2NoDest(fld_opcode, rs_rSP.GetReg(),
src2_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fld_2, (src2_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, is_double /* is64bit */);
- LIR *fld_1 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(),
+ LIR *fld_1 = NewLIR2NoDest(fld_opcode, rs_rSP.GetReg(),
src1_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fld_1, (src1_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, is_double /* is64bit */);
@@ -430,7 +433,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
// Now store result in the destination VR's stack location.
int displacement = dest_v_reg_offset + LOWORD_OFFSET;
int opcode = is_double ? kX86Fst64M : kX86Fst32M;
- LIR *fst = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement);
+ LIR *fst = NewLIR2NoDest(opcode, rs_rSP.GetReg(), displacement);
AnnotateDalvikRegAccess(fst, displacement >> 2, false /* is_load */, is_double /* is64bit */);
// Pop ST(1) and ST(0).
@@ -448,10 +451,10 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(rs_rSP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
- Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg);
+ Load32Disp(rs_rSP, dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
}
@@ -639,7 +642,7 @@ bool X86Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
// Operate directly into memory.
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement, 0x7fffffff);
+ LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement, 0x7fffffff);
AnnotateDalvikRegAccess(lir, displacement >> 2, false /*is_load */, false /* is_64bit */);
AnnotateDalvikRegAccess(lir, displacement >> 2, true /* is_load */, false /* is_64bit*/);
return true;
@@ -703,7 +706,7 @@ bool X86Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
// Operate directly into memory.
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff);
+ LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, true /* is_load */, true /* is_64bit*/);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, false /*is_load */, true /* is_64bit */);
return true;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 26465a5568..781c12807b 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -1124,15 +1124,16 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
const size_t push_offset = (push_si ? 4u : 0u) + (push_di ? 4u : 0u);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
if (!obj_in_si && !obj_in_di) {
- LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
+ LoadWordDisp(rs_rSP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
}
if (!off_in_si && !off_in_di) {
- LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
+ LoadWordDisp(rs_rSP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
@@ -1507,12 +1508,14 @@ void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int
case 0:
NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg());
break;
- case 1:
- LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, kNotVolatile);
+ case 1: {
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ LoadBaseDisp(rs_rSP, displacement, dest, k32, kNotVolatile);
break;
+ }
default:
m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(),
- rs_rX86_SP.GetReg(), displacement, val);
+ rs_rX86_SP_32.GetReg(), displacement, val);
AnnotateDalvikRegAccess(m, displacement >> 2, true /* is_load */, true /* is_64bit */);
break;
}
@@ -1653,7 +1656,7 @@ bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64
if (src1_in_reg) {
NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
} else {
- LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET);
+ LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP_32.GetReg(), displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
}
@@ -1719,12 +1722,13 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// At this point, the VRs are in their home locations.
bool src1_in_reg = rl_src1.location == kLocPhysReg;
bool src2_in_reg = rl_src2.location == kLocPhysReg;
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
// ECX <- 1H
if (src1_in_reg) {
NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
+ LoadBaseDisp(rs_rSP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32,
kNotVolatile);
}
@@ -1735,7 +1739,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
- LIR* m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(),
+ LIR* m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -1748,7 +1752,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
+ LoadBaseDisp(rs_rSP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32,
kNotVolatile);
}
@@ -1757,7 +1761,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Imul32RR, rs_r0.GetReg(), rl_src1.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
- LIR *m = NewLIR3(kX86Imul32RM, rs_r0.GetReg(), rs_rX86_SP.GetReg(),
+ LIR *m = NewLIR3(kX86Imul32RM, rs_r0.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -1768,7 +1772,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR2(kX86Imul32RR, rs_r1.GetReg(), rl_src2.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
- LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP.GetReg(),
+ LIR *m = NewLIR3(kX86Imul32RM, rs_r1.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
@@ -1782,7 +1786,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
if (src2_in_reg) {
NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg());
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
+ LoadBaseDisp(rs_rSP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32,
kNotVolatile);
}
@@ -1791,7 +1795,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
NewLIR1(kX86Mul32DaR, rl_src1.reg.GetLowReg());
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
- LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP.GetReg(), displacement + LOWORD_OFFSET);
+ LIR *m = NewLIR2(kX86Mul32DaM, rs_rX86_SP_32.GetReg(), displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
}
@@ -1833,7 +1837,7 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
// RHS is in memory.
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_src.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1876,7 +1880,7 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi
// Operate directly into memory.
X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2106,7 +2110,7 @@ void X86Mir2Lir::GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src,
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
int displacement = SRegOffset(rl_src.s_reg_low);
// RDX:RAX = magic * numerator.
- LIR *m = NewLIR2(kX86Imul64DaM, rs_rX86_SP.GetReg(), displacement);
+ LIR *m = NewLIR2(kX86Imul64DaM, rs_rX86_SP_32.GetReg(), displacement);
AnnotateDalvikRegAccess(m, displacement >> 2,
true /* is_load */, true /* is_64bit */);
} else {
@@ -2723,7 +2727,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2754,7 +2758,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
// Can we just do this into memory?
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -3198,7 +3202,7 @@ void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
} else {
int displacement = SRegOffset(rl_src.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP.GetReg(),
+ LIR *m = NewLIR3(kX86MovsxdRM, rl_result.reg.GetReg(), rs_rX86_SP_32.GetReg(),
displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is_64bit */);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 270a4e5007..db2f272436 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -141,27 +141,6 @@ static constexpr ArrayRef<const RegStorage> dp_temps_64(dp_temps_arr_64);
static constexpr ArrayRef<const RegStorage> xp_temps_32(xp_temps_arr_32);
static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
-RegStorage rs_rX86_SP;
-
-RegStorage rs_rX86_ARG0;
-RegStorage rs_rX86_ARG1;
-RegStorage rs_rX86_ARG2;
-RegStorage rs_rX86_ARG3;
-RegStorage rs_rX86_ARG4;
-RegStorage rs_rX86_ARG5;
-RegStorage rs_rX86_FARG0;
-RegStorage rs_rX86_FARG1;
-RegStorage rs_rX86_FARG2;
-RegStorage rs_rX86_FARG3;
-RegStorage rs_rX86_FARG4;
-RegStorage rs_rX86_FARG5;
-RegStorage rs_rX86_FARG6;
-RegStorage rs_rX86_FARG7;
-RegStorage rs_rX86_RET0;
-RegStorage rs_rX86_RET1;
-RegStorage rs_rX86_INVOKE_TGT;
-RegStorage rs_rX86_COUNT;
-
RegLocation X86Mir2Lir::LocCReturn() {
return x86_loc_c_return;
}
@@ -182,39 +161,94 @@ RegLocation X86Mir2Lir::LocCReturnDouble() {
return x86_loc_c_return_double;
}
+// 32-bit reg storage locations for 32-bit targets.
+static const RegStorage RegStorage32FromSpecialTargetRegister_Target32[] {
+ RegStorage::InvalidReg(), // kSelf - Thread pointer.
+ RegStorage::InvalidReg(), // kSuspend - Used to reduce suspend checks for some targets.
+ RegStorage::InvalidReg(), // kLr - no register as the return address is pushed on entry.
+ RegStorage::InvalidReg(), // kPc - not exposed on X86 see kX86StartOfMethod.
+ rs_rX86_SP_32, // kSp
+ rs_rAX, // kArg0
+ rs_rCX, // kArg1
+ rs_rDX, // kArg2
+ rs_rBX, // kArg3
+ RegStorage::InvalidReg(), // kArg4
+ RegStorage::InvalidReg(), // kArg5
+ RegStorage::InvalidReg(), // kArg6
+ RegStorage::InvalidReg(), // kArg7
+ rs_rAX, // kFArg0
+ rs_rCX, // kFArg1
+ rs_rDX, // kFArg2
+ rs_rBX, // kFArg3
+ RegStorage::InvalidReg(), // kFArg4
+ RegStorage::InvalidReg(), // kFArg5
+ RegStorage::InvalidReg(), // kFArg6
+ RegStorage::InvalidReg(), // kFArg7
+ RegStorage::InvalidReg(), // kFArg8
+ RegStorage::InvalidReg(), // kFArg9
+ RegStorage::InvalidReg(), // kFArg10
+ RegStorage::InvalidReg(), // kFArg11
+ RegStorage::InvalidReg(), // kFArg12
+ RegStorage::InvalidReg(), // kFArg13
+ RegStorage::InvalidReg(), // kFArg14
+ RegStorage::InvalidReg(), // kFArg15
+ rs_rAX, // kRet0
+ rs_rDX, // kRet1
+ rs_rAX, // kInvokeTgt
+ rs_rAX, // kHiddenArg - used to hold the method index before copying to fr0.
+ rs_fr0, // kHiddenFpArg
+ rs_rCX, // kCount
+};
+
+// 32-bit reg storage locations for 64-bit targets.
+static const RegStorage RegStorage32FromSpecialTargetRegister_Target64[] {
+ RegStorage::InvalidReg(), // kSelf - Thread pointer.
+ RegStorage::InvalidReg(), // kSuspend - Used to reduce suspend checks for some targets.
+ RegStorage::InvalidReg(), // kLr - no register as the return address is pushed on entry.
+ RegStorage::InvalidReg(), // kPc - TODO: RIP based addressing.
+ rs_rX86_SP_32, // kSp
+ rs_rDI, // kArg0
+ rs_rSI, // kArg1
+ rs_rDX, // kArg2
+ rs_rCX, // kArg3
+ rs_r8, // kArg4
+ rs_r9, // kArg5
+ RegStorage::InvalidReg(), // kArg6
+ RegStorage::InvalidReg(), // kArg7
+ rs_fr0, // kFArg0
+ rs_fr1, // kFArg1
+ rs_fr2, // kFArg2
+ rs_fr3, // kFArg3
+ rs_fr4, // kFArg4
+ rs_fr5, // kFArg5
+ rs_fr6, // kFArg6
+ rs_fr7, // kFArg7
+ RegStorage::InvalidReg(), // kFArg8
+ RegStorage::InvalidReg(), // kFArg9
+ RegStorage::InvalidReg(), // kFArg10
+ RegStorage::InvalidReg(), // kFArg11
+ RegStorage::InvalidReg(), // kFArg12
+ RegStorage::InvalidReg(), // kFArg13
+ RegStorage::InvalidReg(), // kFArg14
+ RegStorage::InvalidReg(), // kFArg15
+ rs_rAX, // kRet0
+ rs_rDX, // kRet1
+ rs_rAX, // kInvokeTgt
+ rs_rAX, // kHiddenArg
+ RegStorage::InvalidReg(), // kHiddenFpArg
+ rs_rCX, // kCount
+};
+static_assert(arraysize(RegStorage32FromSpecialTargetRegister_Target32) ==
+ arraysize(RegStorage32FromSpecialTargetRegister_Target64),
+ "Mismatch in RegStorage array sizes");
+
// Return a target-dependent special register for 32-bit.
-RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
- RegStorage res_reg = RegStorage::InvalidReg();
- switch (reg) {
- case kSelf: res_reg = RegStorage::InvalidReg(); break;
- case kSuspend: res_reg = RegStorage::InvalidReg(); break;
- case kLr: res_reg = RegStorage::InvalidReg(); break;
- case kPc: res_reg = RegStorage::InvalidReg(); break;
- case kSp: res_reg = rs_rX86_SP_32; break; // This must be the concrete one, as _SP is target-
- // specific size.
- case kArg0: res_reg = rs_rX86_ARG0; break;
- case kArg1: res_reg = rs_rX86_ARG1; break;
- case kArg2: res_reg = rs_rX86_ARG2; break;
- case kArg3: res_reg = rs_rX86_ARG3; break;
- case kArg4: res_reg = rs_rX86_ARG4; break;
- case kArg5: res_reg = rs_rX86_ARG5; break;
- case kFArg0: res_reg = rs_rX86_FARG0; break;
- case kFArg1: res_reg = rs_rX86_FARG1; break;
- case kFArg2: res_reg = rs_rX86_FARG2; break;
- case kFArg3: res_reg = rs_rX86_FARG3; break;
- case kFArg4: res_reg = rs_rX86_FARG4; break;
- case kFArg5: res_reg = rs_rX86_FARG5; break;
- case kFArg6: res_reg = rs_rX86_FARG6; break;
- case kFArg7: res_reg = rs_rX86_FARG7; break;
- case kRet0: res_reg = rs_rX86_RET0; break;
- case kRet1: res_reg = rs_rX86_RET1; break;
- case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
- case kHiddenArg: res_reg = rs_rAX; break;
- case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break;
- case kCount: res_reg = rs_rX86_COUNT; break;
- default: res_reg = RegStorage::InvalidReg();
- }
- return res_reg;
+RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) const {
+ DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target32[kCount], rs_rCX);
+ DCHECK_EQ(RegStorage32FromSpecialTargetRegister_Target64[kCount], rs_rCX);
+ DCHECK_LT(reg, arraysize(RegStorage32FromSpecialTargetRegister_Target32));
+ return cu_->target64 ? RegStorage32FromSpecialTargetRegister_Target64[reg]
+ : RegStorage32FromSpecialTargetRegister_Target32[reg];
}
RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
@@ -433,7 +467,7 @@ void X86Mir2Lir::AdjustSpillMask() {
RegStorage X86Mir2Lir::AllocateByteRegister() {
RegStorage reg = AllocTypedTemp(false, kCoreReg);
if (!cu_->target64) {
- DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum());
+ DCHECK_LT(reg.GetRegNum(), rs_rX86_SP_32.GetRegNum());
}
return reg;
}
@@ -442,8 +476,8 @@ RegStorage X86Mir2Lir::Get128BitRegister(RegStorage reg) {
return GetRegInfo(reg)->Master()->GetReg();
}
-bool X86Mir2Lir::IsByteRegister(RegStorage reg) {
- return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum();
+bool X86Mir2Lir::IsByteRegister(RegStorage reg) const {
+ return cu_->target64 || reg.GetRegNum() < rs_rX86_SP_32.GetRegNum();
}
/* Clobber all regs that might be used by an external C call */
@@ -483,8 +517,8 @@ void X86Mir2Lir::ClobberCallerSave() {
RegLocation X86Mir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
- DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
+ DCHECK_EQ(res.reg.GetLowReg(), rs_rAX.GetReg());
+ DCHECK_EQ(res.reg.GetHighReg(), rs_rDX.GetReg());
Clobber(rs_rAX);
Clobber(rs_rDX);
MarkInUse(rs_rAX);
@@ -503,41 +537,41 @@ RegLocation X86Mir2Lir::GetReturnAlt() {
/* To be used when explicitly managing register use */
void X86Mir2Lir::LockCallTemps() {
- LockTemp(rs_rX86_ARG0);
- LockTemp(rs_rX86_ARG1);
- LockTemp(rs_rX86_ARG2);
- LockTemp(rs_rX86_ARG3);
+ LockTemp(TargetReg32(kArg0));
+ LockTemp(TargetReg32(kArg1));
+ LockTemp(TargetReg32(kArg2));
+ LockTemp(TargetReg32(kArg3));
if (cu_->target64) {
- LockTemp(rs_rX86_ARG4);
- LockTemp(rs_rX86_ARG5);
- LockTemp(rs_rX86_FARG0);
- LockTemp(rs_rX86_FARG1);
- LockTemp(rs_rX86_FARG2);
- LockTemp(rs_rX86_FARG3);
- LockTemp(rs_rX86_FARG4);
- LockTemp(rs_rX86_FARG5);
- LockTemp(rs_rX86_FARG6);
- LockTemp(rs_rX86_FARG7);
+ LockTemp(TargetReg32(kArg4));
+ LockTemp(TargetReg32(kArg5));
+ LockTemp(TargetReg32(kFArg0));
+ LockTemp(TargetReg32(kFArg1));
+ LockTemp(TargetReg32(kFArg2));
+ LockTemp(TargetReg32(kFArg3));
+ LockTemp(TargetReg32(kFArg4));
+ LockTemp(TargetReg32(kFArg5));
+ LockTemp(TargetReg32(kFArg6));
+ LockTemp(TargetReg32(kFArg7));
}
}
/* To be used when explicitly managing register use */
void X86Mir2Lir::FreeCallTemps() {
- FreeTemp(rs_rX86_ARG0);
- FreeTemp(rs_rX86_ARG1);
- FreeTemp(rs_rX86_ARG2);
- FreeTemp(rs_rX86_ARG3);
+ FreeTemp(TargetReg32(kArg0));
+ FreeTemp(TargetReg32(kArg1));
+ FreeTemp(TargetReg32(kArg2));
+ FreeTemp(TargetReg32(kArg3));
if (cu_->target64) {
- FreeTemp(rs_rX86_ARG4);
- FreeTemp(rs_rX86_ARG5);
- FreeTemp(rs_rX86_FARG0);
- FreeTemp(rs_rX86_FARG1);
- FreeTemp(rs_rX86_FARG2);
- FreeTemp(rs_rX86_FARG3);
- FreeTemp(rs_rX86_FARG4);
- FreeTemp(rs_rX86_FARG5);
- FreeTemp(rs_rX86_FARG6);
- FreeTemp(rs_rX86_FARG7);
+ FreeTemp(TargetReg32(kArg4));
+ FreeTemp(TargetReg32(kArg5));
+ FreeTemp(TargetReg32(kFArg0));
+ FreeTemp(TargetReg32(kFArg1));
+ FreeTemp(TargetReg32(kFArg2));
+ FreeTemp(TargetReg32(kFArg3));
+ FreeTemp(TargetReg32(kFArg4));
+ FreeTemp(TargetReg32(kFArg5));
+ FreeTemp(TargetReg32(kFArg6));
+ FreeTemp(TargetReg32(kFArg7));
}
}
@@ -687,11 +721,14 @@ void X86Mir2Lir::SpillCoreRegs() {
}
// Spill mask not including fake return address register
uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
- int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
+ int offset =
+ frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
OpSize size = cu_->target64 ? k64 : k32;
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- StoreBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
+ StoreBaseDisp(rs_rSP, offset,
+ cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
size, kNotVolatile);
offset += GetInstructionSetPointerSize(cu_->instruction_set);
}
@@ -706,9 +743,10 @@ void X86Mir2Lir::UnSpillCoreRegs() {
uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
OpSize size = cu_->target64 ? k64 : k32;
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- LoadBaseDisp(rs_rX86_SP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
+ LoadBaseDisp(rs_rSP, offset, cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg),
size, kNotVolatile);
offset += GetInstructionSetPointerSize(cu_->instruction_set);
}
@@ -720,11 +758,12 @@ void X86Mir2Lir::SpillFPRegs() {
return;
}
uint32_t mask = fp_spill_mask_;
- int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ int offset = frame_size_ -
+ (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- StoreBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
- k64, kNotVolatile);
+ StoreBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg), k64, kNotVolatile);
offset += sizeof(double);
}
}
@@ -734,10 +773,12 @@ void X86Mir2Lir::UnSpillFPRegs() {
return;
}
uint32_t mask = fp_spill_mask_;
- int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ int offset = frame_size_ -
+ (GetInstructionSetPointerSize(cu_->instruction_set) * (num_fp_spills_ + num_core_spills_));
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
for (int reg = 0; mask; mask >>= 1, reg++) {
if (mask & 0x1) {
- LoadBaseDisp(rs_rX86_SP, offset, RegStorage::FloatSolo64(reg),
+ LoadBaseDisp(rs_rSP, offset, RegStorage::FloatSolo64(reg),
k64, kNotVolatile);
offset += sizeof(double);
}
@@ -783,49 +824,6 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
<< " is wrong: expecting " << i << ", seeing "
<< static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
}
- if (cu_->target64) {
- rs_rX86_SP = rs_rX86_SP_64;
-
- rs_rX86_ARG0 = rs_rDI;
- rs_rX86_ARG1 = rs_rSI;
- rs_rX86_ARG2 = rs_rDX;
- rs_rX86_ARG3 = rs_rCX;
- rs_rX86_ARG4 = rs_r8;
- rs_rX86_ARG5 = rs_r9;
- rs_rX86_FARG0 = rs_fr0;
- rs_rX86_FARG1 = rs_fr1;
- rs_rX86_FARG2 = rs_fr2;
- rs_rX86_FARG3 = rs_fr3;
- rs_rX86_FARG4 = rs_fr4;
- rs_rX86_FARG5 = rs_fr5;
- rs_rX86_FARG6 = rs_fr6;
- rs_rX86_FARG7 = rs_fr7;
- rs_rX86_INVOKE_TGT = rs_rDI;
- } else {
- rs_rX86_SP = rs_rX86_SP_32;
-
- rs_rX86_ARG0 = rs_rAX;
- rs_rX86_ARG1 = rs_rCX;
- rs_rX86_ARG2 = rs_rDX;
- rs_rX86_ARG3 = rs_rBX;
- rs_rX86_ARG4 = RegStorage::InvalidReg();
- rs_rX86_ARG5 = RegStorage::InvalidReg();
- rs_rX86_FARG0 = rs_rAX;
- rs_rX86_FARG1 = rs_rCX;
- rs_rX86_FARG2 = rs_rDX;
- rs_rX86_FARG3 = rs_rBX;
- rs_rX86_FARG4 = RegStorage::InvalidReg();
- rs_rX86_FARG5 = RegStorage::InvalidReg();
- rs_rX86_FARG6 = RegStorage::InvalidReg();
- rs_rX86_FARG7 = RegStorage::InvalidReg();
- rs_rX86_INVOKE_TGT = rs_rAX;
- // TODO(64): Initialize with invalid reg
-// rX86_ARG4 = RegStorage::InvalidReg();
-// rX86_ARG5 = RegStorage::InvalidReg();
- }
- rs_rX86_RET0 = rs_rAX;
- rs_rX86_RET1 = rs_rDX;
- rs_rX86_COUNT = rs_rCX;
}
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -875,7 +873,7 @@ void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
(rl_dest.location == kLocCompilerTemp)) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
- int r_base = rs_rX86_SP.GetReg();
+ int r_base = rs_rX86_SP_32.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1327,7 +1325,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
// Load the start index from stack, remembering that we pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- Load32Disp(rs_rX86_SP, displacement, rs_rDI);
+ Load32Disp(rs_rX86_SP_32, displacement, rs_rDI);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - 1;
@@ -2264,7 +2262,7 @@ void X86Mir2Lir::GenReduceVector(MIR* mir) {
StoreFinalValue(rl_dest, rl_result);
} else {
int displacement = SRegOffset(rl_result.s_reg_low);
- LIR *l = NewLIR3(extr_opcode, rs_rX86_SP.GetReg(), displacement, vector_src.GetReg());
+ LIR *l = NewLIR3(extr_opcode, rs_rX86_SP_32.GetReg(), displacement, vector_src.GetReg());
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is_wide /* is_64bit */);
AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is_wide /* is_64bit */);
}
@@ -2462,18 +2460,14 @@ RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
return in_to_reg_storage_mapping_.Get(arg_num);
}
-RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) {
+RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) const {
// For the 32-bit internal ABI, the first 3 arguments are passed in registers.
// Not used for 64-bit, TODO: Move X86_32 to the same framework
switch (core_arg_num) {
- case 0:
- return rs_rX86_ARG1;
- case 1:
- return rs_rX86_ARG2;
- case 2:
- return rs_rX86_ARG3;
- default:
- return RegStorage::InvalidReg();
+ case 0: return TargetReg32(kArg1);
+ case 1: return TargetReg32(kArg2);
+ case 2: return TargetReg32(kArg3);
+ default: return RegStorage::InvalidReg();
}
}
@@ -2503,7 +2497,8 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile);
+ const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32;
+ StoreRefDisp(rs_rSP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile);
}
if (mir_graph_->GetNumOfInVRs() == 0) {
@@ -2540,9 +2535,9 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
} else {
// Needs flush.
if (t_loc->ref) {
- StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile);
+ StoreRefDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), reg, kNotVolatile);
} else {
- StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
+ StoreBaseDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
kNotVolatile);
}
}
@@ -2550,9 +2545,9 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
// If arriving in frame & promoted.
if (t_loc->location == kLocPhysReg) {
if (t_loc->ref) {
- LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
+ LoadRefDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
} else {
- LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg,
+ LoadBaseDisp(rs_rX86_SP_64, SRegOffset(start_vreg + i), t_loc->reg,
t_loc->wide ? k64 : k32, kNotVolatile);
}
}
@@ -2578,16 +2573,16 @@ int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
uintptr_t direct_method, InvokeType type, bool skip_this) {
if (!cu_->target64) {
return Mir2Lir::GenDalvikArgsNoRange(info,
- call_state, pcrLabel, next_call_insn,
- target_method,
- vtable_idx, direct_code,
- direct_method, type, skip_this);
+ call_state, pcrLabel, next_call_insn,
+ target_method,
+ vtable_idx, direct_code,
+ direct_method, type, skip_this);
}
return GenDalvikArgsRange(info,
- call_state, pcrLabel, next_call_insn,
- target_method,
- vtable_idx, direct_code,
- direct_method, type, skip_this);
+ call_state, pcrLabel, next_call_insn,
+ target_method,
+ vtable_idx, direct_code,
+ direct_method, type, skip_this);
}
/*
@@ -2643,14 +2638,14 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
}
next_arg++;
}
@@ -2705,23 +2700,23 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
ScopedMemRefType mem_ref_type2(this, ResourceMask::kDalvikReg);
if (src_is_16b_aligned) {
- ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
+ ld1 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset, kMovA128FP);
} else if (src_is_8b_aligned) {
- ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP);
- ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1),
+ ld1 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset, kMovLo128FP);
+ ld2 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset + (bytes_to_move >> 1),
kMovHi128FP);
} else {
- ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP);
+ ld1 = OpMovRegMem(temp, rs_rX86_SP_64, current_src_offset, kMovU128FP);
}
if (dest_is_16b_aligned) {
- st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP);
+ st1 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset, temp, kMovA128FP);
} else if (dest_is_8b_aligned) {
- st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP);
- st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1),
+ st1 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset, temp, kMovLo128FP);
+ st2 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset + (bytes_to_move >> 1),
temp, kMovHi128FP);
} else {
- st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP);
+ st1 = OpMovMemReg(rs_rX86_SP_64, current_dest_offset, temp, kMovU128FP);
}
// TODO If we could keep track of aliasing information for memory accesses that are wider
@@ -2758,8 +2753,8 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
RegStorage temp = TargetReg(kArg3, kNotWide);
// Now load the argument VR and store to the outs.
- Load32Disp(rs_rX86_SP, current_src_offset, temp);
- Store32Disp(rs_rX86_SP, current_dest_offset, temp);
+ Load32Disp(rs_rX86_SP_64, current_src_offset, temp);
+ Store32Disp(rs_rX86_SP_64, current_dest_offset, temp);
}
current_src_offset += bytes_to_move;
@@ -2785,17 +2780,17 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, rl_arg.reg, k64, kNotVolatile);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, regWide, k64, kNotVolatile);
}
} else {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, rl_arg.reg, k32, kNotVolatile);
} else {
LoadValueDirectFixed(rl_arg, regSingle);
- StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP_64, out_offset, regSingle, k32, kNotVolatile);
}
}
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index cb9a24a336..c1c79caa19 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -230,7 +230,7 @@ LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
// TODO: there are several instances of this check. A utility function perhaps?
// TODO: Similar to Arm's reg < 8 check. Perhaps add attribute checks to RegStorage?
// Use shifts instead of a byte operand if the source can't be byte accessed.
- if (r_src2.GetRegNum() >= rs_rX86_SP.GetRegNum()) {
+ if (r_src2.GetRegNum() >= rs_rX86_SP_32.GetRegNum()) {
NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24);
return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(),
@@ -385,7 +385,7 @@ LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int o
}
LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rX86_SP);
+ DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
}
return l;
@@ -411,7 +411,7 @@ LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) {
LOG(FATAL) << "Bad case in OpMemReg " << op;
break;
}
- LIR *l = NewLIR3(opcode, rs_rX86_SP.GetReg(), displacement, r_value);
+ LIR *l = NewLIR3(opcode, rs_rX86_SP_32.GetReg(), displacement, r_value);
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
@@ -437,7 +437,7 @@ LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) {
LOG(FATAL) << "Bad case in OpRegMem " << op;
break;
}
- LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP.GetReg(), displacement);
+ LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP_32.GetReg(), displacement);
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
}
@@ -514,7 +514,7 @@ LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int
r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
} else if (op == kOpAdd) { // lea add special case
return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
- r_src.GetReg() /* base */, rs_rX86_SP.GetReg()/*r4sib_no_index*/ /* index */,
+ r_src.GetReg() /* base */, rs_rX86_SP_32.GetReg()/*r4sib_no_index*/ /* index */,
0 /* scale */, value /* disp */);
}
OpRegCopy(r_dest, r_src);
@@ -705,7 +705,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
}
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rX86_SP);
+ DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
true /* is_load */, is64bit);
if (pair) {
@@ -870,7 +870,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
}
if (mem_ref_type_ == ResourceMask::kDalvikReg) {
- DCHECK(r_base == rs_rX86_SP);
+ DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
false /* is_load */, is64bit);
if (pair) {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index afdc244dac..76a67c4d6c 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -234,7 +234,7 @@ constexpr RegStorage rs_r3q(RegStorage::kValid | r3q);
constexpr RegStorage rs_rBX = rs_r3;
constexpr RegStorage rs_rX86_SP_64(RegStorage::kValid | r4sp_64);
constexpr RegStorage rs_rX86_SP_32(RegStorage::kValid | r4sp_32);
-extern RegStorage rs_rX86_SP;
+static_assert(rs_rX86_SP_64.GetRegNum() == rs_rX86_SP_32.GetRegNum(), "Unexpected mismatch");
constexpr RegStorage rs_r5(RegStorage::kValid | r5);
constexpr RegStorage rs_r5q(RegStorage::kValid | r5q);
constexpr RegStorage rs_rBP = rs_r5;
@@ -313,24 +313,8 @@ constexpr RegStorage rs_xr13(RegStorage::kValid | xr13);
constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
-extern RegStorage rs_rX86_ARG0;
-extern RegStorage rs_rX86_ARG1;
-extern RegStorage rs_rX86_ARG2;
-extern RegStorage rs_rX86_ARG3;
-extern RegStorage rs_rX86_ARG4;
-extern RegStorage rs_rX86_ARG5;
-extern RegStorage rs_rX86_FARG0;
-extern RegStorage rs_rX86_FARG1;
-extern RegStorage rs_rX86_FARG2;
-extern RegStorage rs_rX86_FARG3;
-extern RegStorage rs_rX86_FARG4;
-extern RegStorage rs_rX86_FARG5;
-extern RegStorage rs_rX86_FARG6;
-extern RegStorage rs_rX86_FARG7;
-extern RegStorage rs_rX86_RET0;
-extern RegStorage rs_rX86_RET1;
-extern RegStorage rs_rX86_INVOKE_TGT;
-extern RegStorage rs_rX86_COUNT;
+constexpr RegStorage rs_rX86_RET0 = rs_rAX;
+constexpr RegStorage rs_rX86_RET1 = rs_rDX;
// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
const RegLocation x86_loc_c_return
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 4a84ff2516..46ed011b53 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -339,6 +339,9 @@ class RegStorage : public ValueObject {
private:
uint16_t reg_;
};
+static inline std::ostream& operator<<(std::ostream& o, const RegStorage& rhs) {
+ return o << rhs.GetRawBits(); // TODO: better output.
+}
} // namespace art
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 448888f02d..cf2cddb896 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -103,13 +103,13 @@ bool ImageWriter::Write(const std::string& image_filename,
std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
if (oat_file.get() == NULL) {
- LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
+ PLOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
return false;
}
std::string error_msg;
oat_file_ = OatFile::OpenReadable(oat_file.get(), oat_location, &error_msg);
if (oat_file_ == nullptr) {
- LOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
+ PLOG(ERROR) << "Failed to open writable oat file " << oat_filename << " for " << oat_location
<< ": " << error_msg;
return false;
}
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 8baf3c5f89..8418ab0a7e 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -64,10 +64,6 @@ class Temporaries : public ValueObject {
size_t index_;
};
-static bool IsTypeSupported(Primitive::Type type) {
- return type != Primitive::kPrimFloat && type != Primitive::kPrimDouble;
-}
-
void HGraphBuilder::InitializeLocals(uint16_t count) {
graph_->SetNumberOfVRegs(count);
locals_.SetSize(count);
@@ -78,10 +74,10 @@ void HGraphBuilder::InitializeLocals(uint16_t count) {
}
}
-bool HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
+void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
// dex_compilation_unit_ is null only when unit testing.
if (dex_compilation_unit_ == nullptr) {
- return true;
+ return;
}
graph_->SetNumberOfInVRegs(number_of_parameters);
@@ -116,7 +112,6 @@ bool HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
parameter_index++;
}
}
- return true;
}
template<typename T>
@@ -195,9 +190,7 @@ HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
}
}
- if (!InitializeParameters(code_item.ins_size_)) {
- return nullptr;
- }
+ InitializeParameters(code_item.ins_size_);
size_t dex_offset = 0;
while (code_ptr < code_end) {
@@ -464,9 +457,6 @@ bool HGraphBuilder::BuildInstanceFieldAccess(const Instruction& instruction,
}
Primitive::Type field_type = resolved_field->GetTypeAsPrimitiveType();
- if (!IsTypeSupported(field_type)) {
- return false;
- }
HInstruction* object = LoadLocal(obj_reg, Primitive::kPrimNot);
current_block_->AddInstruction(new (arena_) HNullCheck(object, dex_offset));
@@ -524,10 +514,6 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
return false;
}
- if (!IsTypeSupported(field_type)) {
- return false;
- }
-
HLoadClass* constant = new (arena_) HLoadClass(
storage_index, is_referrers_class, dex_offset);
current_block_->AddInstruction(constant);
@@ -582,8 +568,6 @@ void HGraphBuilder::BuildArrayAccess(const Instruction& instruction,
uint8_t array_reg = instruction.VRegB_23x();
uint8_t index_reg = instruction.VRegC_23x();
- DCHECK(IsTypeSupported(anticipated_type));
-
// We need one temporary for the null check, one for the index, and one for the length.
Temporaries temps(graph_, 3);
@@ -799,8 +783,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
- // TODO: these instructions are also used to move floating point values, so what is
- // the type (int or float)?
+ // Note that the SSA building will refine the types.
case Instruction::MOVE:
case Instruction::MOVE_FROM16:
case Instruction::MOVE_16: {
@@ -809,8 +792,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
- // TODO: these instructions are also used to move floating point values, so what is
- // the type (long or double)?
+ // Note that the SSA building will refine the types.
case Instruction::MOVE_WIDE:
case Instruction::MOVE_WIDE_FROM16:
case Instruction::MOVE_WIDE_16: {
@@ -884,7 +866,8 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
instruction.GetVarArgs(args);
- if (!BuildInvoke(instruction, dex_offset, method_idx, number_of_vreg_arguments, false, args, -1)) {
+ if (!BuildInvoke(instruction, dex_offset, method_idx,
+ number_of_vreg_arguments, false, args, -1)) {
return false;
}
break;
@@ -1286,7 +1269,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
return false;
}
current_block_->AddInstruction(
- new (arena_) HLoadClass(instruction.VRegB_21c(), is_referrers_class, dex_offset));
+ new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
@@ -1308,11 +1291,34 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::INSTANCE_OF: {
+ uint16_t type_index = instruction.VRegC_22c();
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ HInstruction* object = LoadLocal(instruction.VRegB_22c(), Primitive::kPrimNot);
+ HLoadClass* cls = new (arena_) HLoadClass(type_index, is_referrers_class, dex_offset);
+ current_block_->AddInstruction(cls);
+ // The class needs a temporary before being used by the type check.
+ Temporaries temps(graph_, 1);
+ temps.Add(cls);
+ current_block_->AddInstruction(
+ new (arena_) HTypeCheck(object, cls, type_known_final, dex_offset));
+ UpdateLocal(instruction.VRegA_22c(), current_block_->GetLastInstruction());
+ break;
+ }
+
default:
return false;
}
return true;
-}
+} // NOLINT(readability/fn_size)
HIntConstant* HGraphBuilder::GetIntConstant0() {
if (constant0_ != nullptr) {
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 030f45b609..09c9a51260 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -93,10 +93,7 @@ class HGraphBuilder : public ValueObject {
void UpdateLocal(int register_index, HInstruction* instruction) const;
HInstruction* LoadLocal(int register_index, Primitive::Type type) const;
void PotentiallyAddSuspendCheck(int32_t target_offset, uint32_t dex_offset);
-
- // Temporarily returns whether the compiler supports the parameters
- // of the method.
- bool InitializeParameters(uint16_t number_of_parameters);
+ void InitializeParameters(uint16_t number_of_parameters);
template<typename T>
void Unop_12x(const Instruction& instruction, Primitive::Type type);
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c965489291..9d172638e1 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -285,16 +285,22 @@ void CodeGenerator::InitLocations(HInstruction* instruction) {
HInstruction* previous = instruction->GetPrevious();
Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
Move(previous, temp_location, instruction);
- previous->GetLocations()->SetOut(temp_location);
}
return;
}
AllocateRegistersLocally(instruction);
for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
Location location = instruction->GetLocations()->InAt(i);
+ HInstruction* input = instruction->InputAt(i);
if (location.IsValid()) {
// Move the input to the desired location.
- Move(instruction->InputAt(i), location, instruction);
+ if (input->GetNext()->IsTemporary()) {
+ // If the input was stored in a temporary, use that temporary to
+ // perform the move.
+ Move(input->GetNext(), location, instruction);
+ } else {
+ Move(input, location, instruction);
+ }
}
}
}
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 8c54cdfcf3..6218fc973a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -212,8 +212,9 @@ class LoadClassSlowPathARM : public SlowPathCodeARM {
arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
// Move the class to the desired location.
- if (locations->Out().IsValid()) {
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
}
codegen->RestoreLiveRegisters(locations);
@@ -266,6 +267,49 @@ class LoadStringSlowPathARM : public SlowPathCodeARM {
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
};
+class TypeCheckSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit TypeCheckSlowPathARM(HTypeCheck* instruction, Location object_class)
+ : instruction_(instruction),
+ object_class_(object_class) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ MoveOperands move1(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ nullptr);
+ MoveOperands move2(object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ nullptr);
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ arm_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+ arm_codegen->InvokeRuntime(
+ QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, instruction_->GetDexPc());
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+
+ codegen->RestoreLiveRegisters(locations);
+ __ b(GetExitLabel());
+ }
+
+ private:
+ HTypeCheck* const instruction_;
+ const Location object_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM);
+};
+
#undef __
#undef __
@@ -766,6 +810,9 @@ void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstr
default:
LOG(FATAL) << "Unexpected type " << instruction->GetType();
}
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ Move32(location, temp_location);
} else {
DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
switch (instruction->GetType()) {
@@ -1861,10 +1908,18 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instr
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister value = locations->InAt(1).As<SRegister>();
+ __ StoreSToOffset(value, obj, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister value = FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>());
+ __ StoreDToOffset(value, obj, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -1922,10 +1977,18 @@ void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instr
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister out = locations->Out().As<SRegister>();
+ __ LoadSFromOffset(out, obj, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister out = FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>());
+ __ LoadDFromOffset(out, obj, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2459,10 +2522,18 @@ void InstructionCodeGeneratorARM::VisitStaticFieldGet(HStaticFieldGet* instructi
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister out = locations->Out().As<SRegister>();
+ __ LoadSFromOffset(out, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister out = FromLowSToD(locations->Out().AsFpuRegisterPairLow<SRegister>());
+ __ LoadDFromOffset(out, cls, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2521,10 +2592,18 @@ void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instructi
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ SRegister value = locations->InAt(1).As<SRegister>();
+ __ StoreSToOffset(value, cls, offset);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ DRegister value = FromLowSToD(locations->InAt(1).AsFpuRegisterPairLow<SRegister>());
+ __ StoreDToOffset(value, cls, offset);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -2577,5 +2656,54 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) {
QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc());
}
+void LocationsBuilderARM::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Register cls = locations->InAt(1).As<Register>();
+ Register out = locations->Out().As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeARM* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ cmp(obj, ShifterOperand(0));
+ __ b(&zero, EQ);
+ // Compare the class of `obj` with `cls`.
+ __ LoadFromOffset(kLoadWord, out, obj, class_offset);
+ __ cmp(out, ShifterOperand(cls));
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ b(&zero, NE);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(
+ instruction, Location::RegisterLocation(out));
+ codegen_->AddSlowPath(slow_path);
+ __ b(slow_path->GetEntryLabel(), NE);
+ __ LoadImmediate(out, 1);
+ __ b(&done);
+ }
+ __ Bind(&zero);
+ __ LoadImmediate(out, 0);
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 667b075c7e..f9cf7d87af 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -413,7 +413,9 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
__ Mov(temp, value);
__ Str(temp, StackOperandFrom(location));
}
-
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ MoveHelper(location, temp_location, type);
} else if (instruction->IsLoadLocal()) {
uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
switch (type) {
@@ -549,6 +551,7 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
M(StaticFieldGet) \
M(StaticFieldSet) \
M(Throw) \
+ M(TypeCheck) \
M(TypeConversion) \
#define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
@@ -583,7 +586,7 @@ void LocationsBuilderARM64::HandleAddSub(HBinaryOperation* instr) {
case Primitive::kPrimLong: {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
}
case Primitive::kPrimBoolean:
@@ -637,7 +640,7 @@ void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
@@ -650,7 +653,7 @@ void LocationsBuilderARM64::VisitCompare(HCompare* instruction) {
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorARM64::VisitCompare(HCompare* instruction) {
@@ -680,7 +683,7 @@ void LocationsBuilderARM64::VisitCondition(HCondition* instruction) {
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
if (instruction->NeedsMaterialization()) {
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
}
@@ -786,7 +789,7 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
@@ -1004,7 +1007,7 @@ void LocationsBuilderARM64::VisitMul(HMul* mul) {
case Primitive::kPrimLong:
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
break;
case Primitive::kPrimFloat:
@@ -1060,7 +1063,7 @@ void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction)
void LocationsBuilderARM64::VisitNot(HNot* instruction) {
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 91aa26902d..82591b0ebf 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -241,10 +241,12 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 {
codegen->RecordPcInfo(at_, dex_pc_);
// Move the class to the desired location.
- if (locations->Out().IsValid()) {
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ Location out = locations->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ x86_codegen->Move32(out, Location::RegisterLocation(EAX));
}
+
codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
@@ -266,6 +268,49 @@ class LoadClassSlowPathX86 : public SlowPathCodeX86 {
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86);
};
+class TypeCheckSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ TypeCheckSlowPathX86(HTypeCheck* instruction, Location object_class)
+ : instruction_(instruction),
+ object_class_(object_class) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ MoveOperands move1(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ nullptr);
+ MoveOperands move2(object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ nullptr);
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ x86_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInstanceofNonTrivial)));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ codegen->RestoreLiveRegisters(locations);
+
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HTypeCheck* const instruction_;
+ const Location object_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86);
+};
+
#undef __
#define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
@@ -623,6 +668,9 @@ void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstr
DCHECK(location.IsConstant());
DCHECK_EQ(location.GetConstant(), instruction);
}
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ Move32(location, temp_location);
} else if (instruction->IsLoadLocal()) {
int slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
switch (instruction->GetType()) {
@@ -1879,10 +1927,18 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instr
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movss(Address(obj, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movsd(Address(obj, offset), value);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -1952,10 +2008,18 @@ void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instr
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movss(out, Address(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movsd(out, Address(obj, offset));
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2543,10 +2607,18 @@ void InstructionCodeGeneratorX86::VisitStaticFieldGet(HStaticFieldGet* instructi
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movss(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movsd(out, Address(cls, offset));
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2618,10 +2690,18 @@ void InstructionCodeGeneratorX86::VisitStaticFieldSet(HStaticFieldSet* instructi
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movss(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movsd(Address(cls, offset), value);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -2671,5 +2751,60 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
+void LocationsBuilderX86::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ Register obj = locations->InAt(0).As<Register>();
+ Location cls = locations->InAt(1);
+ Register out = locations->Out().As<Register>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeX86* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ __ movl(out, Address(obj, class_offset));
+ // Compare the class of `obj` with `cls`.
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.As<Register>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(ESP, cls.GetStackIndex()));
+ }
+
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86(
+ instruction, Location::RegisterLocation(out));
+ codegen_->AddSlowPath(slow_path);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ }
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1130b4c692..d2730a5b37 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -224,10 +224,11 @@ class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 {
: QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
codegen->RecordPcInfo(at_, dex_pc_);
+ Location out = locations->Out();
// Move the class to the desired location.
- if (locations->Out().IsValid()) {
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ x64_codegen->Move(out, Location::RegisterLocation(RAX));
}
codegen->RestoreLiveRegisters(locations);
@@ -281,6 +282,50 @@ class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
};
+class TypeCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ TypeCheckSlowPathX86_64(HTypeCheck* instruction, Location object_class)
+ : instruction_(instruction),
+ object_class_(object_class) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ // We're moving two locations to locations that could overlap, so we need a parallel
+ // move resolver.
+ InvokeRuntimeCallingConvention calling_convention;
+ MoveOperands move1(locations->InAt(1),
+ Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
+ nullptr);
+ MoveOperands move2(object_class_,
+ Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
+ nullptr);
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+ parallel_move.AddMove(&move1);
+ parallel_move.AddMove(&move2);
+ x64_codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInstanceofNonTrivial), true));
+ codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+
+ codegen->RestoreLiveRegisters(locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HTypeCheck* const instruction_;
+ const Location object_class_;
+
+ DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64);
+};
+
#undef __
#define __ reinterpret_cast<X86_64Assembler*>(GetAssembler())->
@@ -559,6 +604,9 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction,
default:
LOG(FATAL) << "Unexpected local type " << instruction->GetType();
}
+ } else if (instruction->IsTemporary()) {
+ Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
+ Move(location, temp_location);
} else {
DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
switch (instruction->GetType()) {
@@ -1727,25 +1775,27 @@ void LocationsBuilderX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instructio
void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).As<CpuRegister>();
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movb(Address(obj, offset), value);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movw(Address(obj, offset), value);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movl(Address(obj, offset), value);
if (field_type == Primitive::kPrimNot) {
CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
@@ -1756,14 +1806,23 @@ void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* in
}
case Primitive::kPrimLong: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movq(Address(obj, offset), value);
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movss(Address(obj, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movsd(Address(obj, offset), value);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -1780,45 +1839,58 @@ void LocationsBuilderX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instructio
void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister obj = locations->InAt(0).As<CpuRegister>();
- CpuRegister out = locations->Out().As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movzxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimByte: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movsxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimShort: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movsxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimChar: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movzxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movl(out, Address(obj, offset));
break;
}
case Primitive::kPrimLong: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movq(out, Address(obj, offset));
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movss(out, Address(obj, offset));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movsd(out, Address(obj, offset));
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2495,45 +2567,58 @@ void LocationsBuilderX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
void InstructionCodeGeneratorX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister cls = locations->InAt(0).As<CpuRegister>();
- CpuRegister out = locations->Out().As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movzxb(out, Address(cls, offset));
break;
}
case Primitive::kPrimByte: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movsxb(out, Address(cls, offset));
break;
}
case Primitive::kPrimShort: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movsxw(out, Address(cls, offset));
break;
}
case Primitive::kPrimChar: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movzxw(out, Address(cls, offset));
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movl(out, Address(cls, offset));
break;
}
case Primitive::kPrimLong: {
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movq(out, Address(cls, offset));
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << instruction->GetType();
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movss(out, Address(cls, offset));
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister out = locations->Out().As<XmmRegister>();
+ __ movsd(out, Address(cls, offset));
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << instruction->GetType();
UNREACHABLE();
@@ -2557,25 +2642,27 @@ void LocationsBuilderX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
CpuRegister cls = locations->InAt(0).As<CpuRegister>();
- CpuRegister value = locations->InAt(1).As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movb(Address(cls, offset), value);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movw(Address(cls, offset), value);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movl(Address(cls, offset), value);
if (field_type == Primitive::kPrimNot) {
CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
@@ -2586,14 +2673,23 @@ void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instru
}
case Primitive::kPrimLong: {
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
__ movq(Address(cls, offset), value);
break;
}
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble:
- LOG(FATAL) << "Unimplemented register type " << field_type;
- UNREACHABLE();
+ case Primitive::kPrimFloat: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movss(Address(cls, offset), value);
+ break;
+ }
+
+ case Primitive::kPrimDouble: {
+ XmmRegister value = locations->InAt(1).As<XmmRegister>();
+ __ movsd(Address(cls, offset), value);
+ break;
+ }
+
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << field_type;
UNREACHABLE();
@@ -2645,5 +2741,59 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) {
codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
}
+void LocationsBuilderX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary::CallKind call_kind = instruction->IsClassFinal()
+ ? LocationSummary::kNoCall
+ : LocationSummary::kCallOnSlowPath;
+ LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::Any());
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitTypeCheck(HTypeCheck* instruction) {
+ LocationSummary* locations = instruction->GetLocations();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ Location cls = locations->InAt(1);
+ CpuRegister out = locations->Out().As<CpuRegister>();
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ Label done, zero;
+ SlowPathCodeX86_64* slow_path = nullptr;
+
+ // Return 0 if `obj` is null.
+ // TODO: avoid this check if we know obj is not null.
+ __ testl(obj, obj);
+ __ j(kEqual, &zero);
+ // Compare the class of `obj` with `cls`.
+ __ movl(out, Address(obj, class_offset));
+ if (cls.IsRegister()) {
+ __ cmpl(out, cls.As<CpuRegister>());
+ } else {
+ DCHECK(cls.IsStackSlot()) << cls;
+ __ cmpl(out, Address(CpuRegister(RSP), cls.GetStackIndex()));
+ }
+ if (instruction->IsClassFinal()) {
+ // Classes must be equal for the instanceof to succeed.
+ __ j(kNotEqual, &zero);
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ } else {
+ // If the classes are not equal, we go into a slow path.
+ DCHECK(locations->OnlyCallsOnSlowPath());
+ slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathX86_64(
+ instruction, Location::RegisterLocation(out.AsRegister()));
+ codegen_->AddSlowPath(slow_path);
+ __ j(kNotEqual, slow_path->GetEntryLabel());
+ __ movl(out, Immediate(1));
+ __ jmp(&done);
+ }
+ __ Bind(&zero);
+ __ movl(out, Immediate(0));
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+ __ Bind(&done);
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 743ffc46bf..1953241a2a 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -53,7 +53,7 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
<< " lists " << block_count_in_p_successors
<< " occurrences of block " << block->GetBlockId()
<< " in its successors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -83,7 +83,7 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
<< " lists " << block_count_in_s_predecessors
<< " occurrences of block " << block->GetBlockId()
<< " in its predecessors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -93,7 +93,7 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
std::stringstream error;
error << "Block " << block->GetBlockId()
<< " does not end with a branch instruction.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Visit this block's list of phis.
@@ -103,7 +103,7 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
std::stringstream error;
error << "Block " << current_block_->GetBlockId()
<< " has a non-phi in its phi list.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
it.Current()->Accept(this);
}
@@ -116,7 +116,7 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
std::stringstream error;
error << "Block " << current_block_->GetBlockId()
<< " has a phi in its non-phi list.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
it.Current()->Accept(this);
}
@@ -139,7 +139,7 @@ void GraphChecker::VisitInstruction(HInstruction* instruction) {
} else {
error << " not associated with any block.";
}
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Ensure the inputs of `instruction` are defined in a block of the graph.
@@ -154,7 +154,7 @@ void GraphChecker::VisitInstruction(HInstruction* instruction) {
error << "Input " << input->GetId()
<< " of instruction " << instruction->GetId()
<< " is not defined in a basic block of the control-flow graph.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -170,7 +170,7 @@ void GraphChecker::VisitInstruction(HInstruction* instruction) {
error << "User " << use->GetId()
<< " of instruction " << instruction->GetId()
<< " is not defined in a basic block of the control-flow graph.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
@@ -188,7 +188,7 @@ void SSAChecker::VisitBasicBlock(HBasicBlock* block) {
std::stringstream error;
error << "Critical edge between blocks " << block->GetBlockId()
<< " and " << successor->GetBlockId() << ".";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
@@ -207,7 +207,7 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
std::stringstream error;
error << "Loop pre-header is not the first predecessor of the loop header "
<< id << ".";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Ensure the loop header has only two predecessors and that only the
@@ -215,25 +215,25 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
if (loop_header->GetPredecessors().Size() < 2) {
std::stringstream error;
error << "Loop header " << id << " has less than two predecessors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
} else if (loop_header->GetPredecessors().Size() > 2) {
std::stringstream error;
error << "Loop header " << id << " has more than two predecessors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
} else {
HLoopInformation* loop_information = loop_header->GetLoopInformation();
HBasicBlock* first_predecessor = loop_header->GetPredecessors().Get(0);
if (loop_information->IsBackEdge(first_predecessor)) {
std::stringstream error;
error << "First predecessor of loop header " << id << " is a back edge.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
HBasicBlock* second_predecessor = loop_header->GetPredecessors().Get(1);
if (!loop_information->IsBackEdge(second_predecessor)) {
std::stringstream error;
error << "Second predecessor of loop header " << id
<< " is not a back edge.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -244,7 +244,7 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
std::stringstream error;
error << "Loop defined by header " << id << " has "
<< num_back_edges << " back edge(s).";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Ensure all blocks in the loop are dominated by the loop header.
@@ -256,7 +256,7 @@ void SSAChecker::CheckLoop(HBasicBlock* loop_header) {
std::stringstream error;
error << "Loop block " << loop_block->GetBlockId()
<< " not dominated by loop header " << id;
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
@@ -274,7 +274,7 @@ void SSAChecker::VisitInstruction(HInstruction* instruction) {
<< " in block " << current_block_->GetBlockId()
<< " does not dominate use " << use->GetId()
<< " in block " << use->GetBlock()->GetBlockId() << ".";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
@@ -292,7 +292,7 @@ void SSAChecker::VisitInstruction(HInstruction* instruction) {
<< " from block " << current_block_->GetBlockId()
<< " does not dominate instruction " << instruction->GetId()
<< ".";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
@@ -307,7 +307,7 @@ void SSAChecker::VisitPhi(HPhi* phi) {
error << "Loop phi " << phi->GetId()
<< " in block " << phi->GetBlock()->GetBlockId()
<< " is its own first input.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
// Ensure the number of phi inputs is the same as the number of
@@ -321,7 +321,7 @@ void SSAChecker::VisitPhi(HPhi* phi) {
<< " has " << phi->InputCount() << " inputs, but block "
<< phi->GetBlock()->GetBlockId() << " has "
<< predecessors.Size() << " predecessors.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
} else {
// Ensure phi input at index I either comes from the Ith
// predecessor or from a block that dominates this predecessor.
@@ -336,7 +336,7 @@ void SSAChecker::VisitPhi(HPhi* phi) {
<< " from block " << phi->GetBlock()->GetBlockId()
<< " is not defined in predecessor number " << i
<< " nor in a block dominating it.";
- errors_.Insert(error.str());
+ errors_.push_back(error.str());
}
}
}
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index badf21d946..8ba8cb16b1 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -30,7 +30,6 @@ class GraphChecker : public HGraphVisitor {
const char* dump_prefix = "art::GraphChecker: ")
: HGraphVisitor(graph),
allocator_(allocator),
- errors_(allocator, 0),
dump_prefix_(dump_prefix) {}
// Check the whole graph (in insertion order).
@@ -44,18 +43,18 @@ class GraphChecker : public HGraphVisitor {
// Was the last visit of the graph valid?
bool IsValid() const {
- return errors_.IsEmpty();
+ return errors_.empty();
}
// Get the list of detected errors.
- const GrowableArray<std::string>& GetErrors() const {
+ const std::vector<std::string>& GetErrors() const {
return errors_;
}
// Print detected errors on output stream `os`.
void Dump(std::ostream& os) const {
- for (size_t i = 0, e = errors_.Size(); i < e; ++i) {
- os << dump_prefix_ << errors_.Get(i) << std::endl;
+ for (size_t i = 0, e = errors_.size(); i < e; ++i) {
+ os << dump_prefix_ << errors_[i] << std::endl;
}
}
@@ -64,7 +63,7 @@ class GraphChecker : public HGraphVisitor {
// The block currently visited.
HBasicBlock* current_block_ = nullptr;
// Errors encountered while checking the graph.
- GrowableArray<std::string> errors_;
+ std::vector<std::string> errors_;
private:
// String displayed before dumped errors.
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index bed688b5e3..d1555d4e11 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -417,6 +417,7 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
LocationSummary(HInstruction* instruction, CallKind call_kind = kNoCall);
void SetInAt(uint32_t at, Location location) {
+ DCHECK(inputs_.Get(at).IsUnallocated() || inputs_.Get(at).IsInvalid());
inputs_.Put(at, location);
}
@@ -429,8 +430,17 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
}
void SetOut(Location location, bool overlaps = true) {
+ DCHECK(output_.IsUnallocated() || output_.IsInvalid());
output_overlaps_ = overlaps;
- output_ = Location(location);
+ output_ = location;
+ }
+
+ void UpdateOut(Location location) {
+ // The only reason for updating an output is for parameters where
+ // we only know the exact stack slot after doing full register
+ // allocation.
+ DCHECK(output_.IsStackSlot() || output_.IsDoubleStackSlot());
+ output_ = location;
}
void AddTemp(Location location) {
@@ -442,6 +452,7 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
}
void SetTempAt(uint32_t at, Location location) {
+ DCHECK(temps_.Get(at).IsUnallocated() || temps_.Get(at).IsInvalid());
temps_.Put(at, location);
}
@@ -528,6 +539,8 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
// Registers that are in use at this position.
RegisterSet live_registers_;
+ ART_FRIEND_TEST(RegisterAllocatorTest, ExpectedInRegisterHint);
+ ART_FRIEND_TEST(RegisterAllocatorTest, SameAsFirstInputHint);
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
};
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 9253f0b740..2dab605465 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -525,6 +525,7 @@ class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
M(SuspendCheck, Instruction) \
M(Temporary, Instruction) \
M(Throw, Instruction) \
+ M(TypeCheck, Instruction) \
M(TypeConversion, Instruction) \
#define FOR_EACH_INSTRUCTION(M) \
@@ -2113,6 +2114,8 @@ class HTemporary : public HTemplateInstruction<0> {
size_t GetIndex() const { return index_; }
+ Primitive::Type GetType() const OVERRIDE { return GetPrevious()->GetType(); }
+
DECLARE_INSTRUCTION(Temporary);
private:
@@ -2348,6 +2351,45 @@ class HThrow : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HThrow);
};
+class HTypeCheck : public HExpression<2> {
+ public:
+ explicit HTypeCheck(HInstruction* object,
+ HLoadClass* constant,
+ bool class_is_final,
+ uint32_t dex_pc)
+ : HExpression(Primitive::kPrimBoolean, SideEffects::None()),
+ class_is_final_(class_is_final),
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, object);
+ SetRawInputAt(1, constant);
+ }
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+
+ bool NeedsEnvironment() const OVERRIDE {
+ // TODO: Can we debug when doing a runtime instanceof check?
+ return false;
+ }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+
+ bool IsClassFinal() const { return class_is_final_; }
+
+ DECLARE_INSTRUCTION(TypeCheck);
+
+ private:
+ const bool class_is_final_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HTypeCheck);
+};
+
+
class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source, Location destination, HInstruction* instruction)
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index bef4af42c2..4d6e66413d 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1165,11 +1165,11 @@ void RegisterAllocator::Resolve() {
if (location.IsStackSlot()) {
location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
current->SetSpillSlot(location.GetStackIndex());
- locations->SetOut(location);
+ locations->UpdateOut(location);
} else if (location.IsDoubleStackSlot()) {
location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
current->SetSpillSlot(location.GetStackIndex());
- locations->SetOut(location);
+ locations->UpdateOut(location);
} else if (current->HasSpillSlot()) {
current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
}
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 9b1a121fbe..3d81362851 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -622,7 +622,8 @@ TEST(RegisterAllocatorTest, ExpectedInRegisterHint) {
liveness.Analyze();
// Check that the field gets put in the register expected by its use.
- ret->GetLocations()->SetInAt(0, Location::RegisterLocation(2));
+ // Don't use SetInAt because we are overriding an already allocated location.
+ ret->GetLocations()->inputs_.Put(0, Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -684,7 +685,8 @@ TEST(RegisterAllocatorTest, SameAsFirstInputHint) {
liveness.Analyze();
// check that both adds get the same register.
- first_add->InputAt(0)->GetLocations()->SetOut(Location::RegisterLocation(2));
+ // Don't use SetOutput because output is already allocated.
+ first_add->InputAt(0)->GetLocations()->output_ = Location::RegisterLocation(2);
ASSERT_EQ(first_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
ASSERT_EQ(second_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);