summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorElena Sayapina <elena.v.sayapina@intel.com>2014-07-01 18:39:52 +0700
committerElena V Sayapina <elena.v.sayapina@intel.com>2014-07-02 06:14:07 +0000
commitdd64450b37776f68b9bfc47f8d9a88bc72c95727 (patch)
treeb9d823dc406be3d68fe903b7644052fbf5d66e62 /compiler
parent8b11544881ad6c8aeb50ba7c6a594363c2b684ec (diff)
downloadart-dd64450b37776f68b9bfc47f8d9a88bc72c95727.tar.gz
art-dd64450b37776f68b9bfc47f8d9a88bc72c95727.tar.bz2
art-dd64450b37776f68b9bfc47f8d9a88bc72c95727.zip
x86_64: Unify 64-bit check in x86 compiler
Update x86-specific Gen64Bit() check with the CompilationUnit target64 field which is set using unified Is64BitInstructionSet(InstructionSet) check. Change-Id: Ic00ac863ed19e4543d7ea878d6c6c76d0bd85ce8 Signed-off-by: Elena Sayapina <elena.v.sayapina@intel.com>
Diffstat (limited to 'compiler')
-rw-r--r--compiler/compilers.cc5
-rw-r--r--compiler/dex/quick/mir_to_lir.h2
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc18
-rw-r--r--compiler/dex/quick/x86/call_x86.cc12
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h7
-rw-r--r--compiler/dex/quick/x86/fp_x86.cc10
-rw-r--r--compiler/dex/quick/x86/int_x86.cc46
-rw-r--r--compiler/dex/quick/x86/target_x86.cc43
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc10
9 files changed, 70 insertions, 83 deletions
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
index 76838d701b..f940b54f22 100644
--- a/compiler/compilers.cc
+++ b/compiler/compilers.cc
@@ -108,10 +108,9 @@ Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_
mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
case kX86:
- mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
+ // Fall-through.
case kX86_64:
- mir_to_lir = X86_64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
break;
default:
LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index cb0bb80fe6..171e871393 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -177,8 +177,6 @@ Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
ArenaAllocator* const arena);
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
ArenaAllocator* const arena);
-Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
- ArenaAllocator* const arena);
// Utility macros to traverse the LIR list.
#define NEXT_LIR(lir) (lir->next)
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 3f362f263e..a491873c2c 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -608,7 +608,7 @@ size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int
++size;
}
}
- if (Gen64Bit() || kIsDebugBuild) {
+ if (cu_->target64 || kIsDebugBuild) {
bool registers_need_rex_prefix = NeedsRex(raw_reg) || NeedsRex(raw_index) || NeedsRex(raw_base);
if (r8_form) {
// Do we need an empty REX prefix to normalize byte registers?
@@ -617,7 +617,7 @@ size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int
(modrm_is_reg_reg && (RegStorage::RegNum(raw_base) >= 4));
}
if (registers_need_rex_prefix) {
- DCHECK(Gen64Bit()) << "Attempt to use a 64-bit only addressable register "
+ DCHECK(cu_->target64) << "Attempt to use a 64-bit only addressable register "
<< RegStorage::RegNum(raw_reg) << " with instruction " << entry->name;
if (entry->skeleton.prefix1 != REX_W && entry->skeleton.prefix2 != REX_W) {
++size; // rex
@@ -636,7 +636,7 @@ size_t X86Mir2Lir::ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int
}
if (!modrm_is_reg_reg) {
if (has_sib || LowRegisterBits(raw_base) == rs_rX86_SP.GetRegNum()
- || (Gen64Bit() && entry->skeleton.prefix1 == THREAD_PREFIX)) {
+ || (cu_->target64 && entry->skeleton.prefix1 == THREAD_PREFIX)) {
// SP requires a SIB byte.
// GS access also needs a SIB byte for absolute adressing in 64-bit mode.
++size;
@@ -812,7 +812,7 @@ size_t X86Mir2Lir::GetInsnSize(LIR* lir) {
case kMacro: // lir operands - 0: reg
DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ +
- ComputeSize(&X86Mir2Lir::EncodingMap[Gen64Bit() ? kX86Sub64RI : kX86Sub32RI],
+ ComputeSize(&X86Mir2Lir::EncodingMap[cu_->target64 ? kX86Sub64RI : kX86Sub32RI],
lir->operands[0], NO_REG, NO_REG, 0) -
// Shorter ax encoding.
(RegStorage::RegNum(lir->operands[0]) == rs_rAX.GetRegNum() ? 1 : 0);
@@ -849,7 +849,7 @@ void X86Mir2Lir::CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw
}
if (RegStorage::RegNum(raw_reg) >= 4) {
// ah, bh, ch and dh are not valid registers in 32-bit.
- CHECK(Gen64Bit() || !entry->skeleton.r8_form)
+ CHECK(cu_->target64 || !entry->skeleton.r8_form)
<< "Invalid register " << static_cast<int>(RegStorage::RegNum(raw_reg))
<< " for instruction " << entry->name << " in "
<< PrettyMethod(cu_->method_idx, *cu_->dex_file);
@@ -893,7 +893,7 @@ void X86Mir2Lir::EmitPrefix(const X86EncodingMap* entry,
rex |= 0x41; // REX.000B
}
if (entry->skeleton.prefix1 != 0) {
- if (Gen64Bit() && entry->skeleton.prefix1 == THREAD_PREFIX) {
+ if (cu_->target64 && entry->skeleton.prefix1 == THREAD_PREFIX) {
// 64 bit addresses by GS, not FS.
code_buffer_.push_back(THREAD_PREFIX_GS);
} else {
@@ -918,7 +918,7 @@ void X86Mir2Lir::EmitPrefix(const X86EncodingMap* entry,
DCHECK_EQ(0, entry->skeleton.prefix2);
}
if (rex != 0) {
- DCHECK(Gen64Bit());
+ DCHECK(cu_->target64);
code_buffer_.push_back(rex);
}
}
@@ -959,7 +959,7 @@ void X86Mir2Lir::EmitDisp(uint8_t base, int32_t disp) {
}
void X86Mir2Lir::EmitModrmThread(uint8_t reg_or_opcode) {
- if (Gen64Bit()) {
+ if (cu_->target64) {
// Absolute adressing for GS access.
uint8_t modrm = (0 << 6) | (reg_or_opcode << 3) | rs_rX86_SP.GetRegNum();
code_buffer_.push_back(modrm);
@@ -1553,7 +1553,7 @@ void X86Mir2Lir::EmitMacro(const X86EncodingMap* entry, int32_t raw_reg, int32_t
uint8_t low_reg = LowRegisterBits(raw_reg);
code_buffer_.push_back(0x58 + low_reg); // pop reg
- EmitRegImm(&X86Mir2Lir::EncodingMap[Gen64Bit() ? kX86Sub64RI : kX86Sub32RI],
+ EmitRegImm(&X86Mir2Lir::EncodingMap[cu_->target64 ? kX86Sub64RI : kX86Sub32RI],
raw_reg, offset + 5 /* size of call +0 */);
}
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 425caec177..bfbfa0e49a 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -94,7 +94,7 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
start_of_method_reg = rl_method.reg;
store_method_addr_used_ = true;
} else {
- if (Gen64Bit()) {
+ if (cu_->target64) {
start_of_method_reg = AllocTempWide();
} else {
start_of_method_reg = AllocTemp();
@@ -119,7 +119,7 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
NewLIR5(kX86PcRelLoadRA, disp_reg.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(),
2, WrapPointer(tab_rec));
// Add displacement to start of method
- if (Gen64Bit()) {
+ if (cu_->target64) {
NewLIR2(kX86Add64RR, start_of_method_reg.GetReg(), disp_reg.GetReg());
} else {
OpRegReg(kOpAdd, start_of_method_reg, disp_reg);
@@ -174,7 +174,7 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
NewLIR1(kX86StartOfMethod, rs_rX86_ARG2.GetReg());
}
NewLIR2(kX86PcRelAdr, rs_rX86_ARG1.GetReg(), WrapPointer(tab_rec));
- NewLIR2(Gen64Bit() ? kX86Add64RR : kX86Add32RR, rs_rX86_ARG1.GetReg(), rs_rX86_ARG2.GetReg());
+ NewLIR2(cu_->target64 ? kX86Add64RR : kX86Add32RR, rs_rX86_ARG1.GetReg(), rs_rX86_ARG2.GetReg());
if (cu_->target64) {
CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData), rs_rX86_ARG0,
rs_rX86_ARG1, true);
@@ -204,7 +204,7 @@ void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
int ct_offset = cu_->target64 ?
Thread::CardTableOffset<8>().Int32Value() :
Thread::CardTableOffset<4>().Int32Value();
- if (Gen64Bit()) {
+ if (cu_->target64) {
NewLIR2(kX86Mov64RT, reg_card_base.GetReg(), ct_offset);
} else {
NewLIR2(kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
@@ -236,7 +236,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
* a leaf *and* our frame size < fudge factor.
*/
const bool skip_overflow_check = mir_graph_->MethodIsLeaf() &&
- !IsLargeFrame(frame_size_, Gen64Bit() ? kX86_64 : kX86);
+ !IsLargeFrame(frame_size_, cu_->target64 ? kX86_64 : kX86);
NewLIR0(kPseudoMethodEntry);
/* Spill core callee saves */
SpillCoreRegs();
@@ -296,7 +296,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
int displacement = SRegOffset(base_of_code_->s_reg_low);
// Native pointer - must be natural word size.
setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0,
- Gen64Bit() ? k64 : k32, kNotVolatile);
+ cu_->target64 ? k64 : k32, kNotVolatile);
}
FreeTemp(rs_rX86_ARG0);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 70382c746a..21d7419d66 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -59,7 +59,7 @@ class X86Mir2Lir : public Mir2Lir {
};
public:
- X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit);
+ X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen helpers.
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
@@ -802,8 +802,6 @@ class X86Mir2Lir : public Mir2Lir {
*/
void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
- bool Gen64Bit() const { return gen64bit_; }
-
// Information derived from analysis of MIR
// The compiler temporary for the code address of the method.
@@ -833,9 +831,6 @@ class X86Mir2Lir : public Mir2Lir {
// Epilogue increment of stack pointer.
LIR* stack_increment_;
- // 64-bit mode
- bool gen64bit_;
-
// The list of const vector literals.
LIR *const_vectors_;
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index f854adb175..1aeacedb77 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -254,7 +254,7 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
return;
}
case Instruction::LONG_TO_DOUBLE:
- if (Gen64Bit()) {
+ if (cu_->target64) {
rcSrc = kCoreReg;
op = kX86Cvtsqi2sdRR;
break;
@@ -262,7 +262,7 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
GenLongToFP(rl_dest, rl_src, true /* is_double */);
return;
case Instruction::LONG_TO_FLOAT:
- if (Gen64Bit()) {
+ if (cu_->target64) {
rcSrc = kCoreReg;
op = kX86Cvtsqi2ssRR;
break;
@@ -270,7 +270,7 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
GenLongToFP(rl_dest, rl_src, false /* is_double */);
return;
case Instruction::FLOAT_TO_LONG:
- if (Gen64Bit()) {
+ if (cu_->target64) {
rl_src = LoadValue(rl_src, kFPReg);
// If result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
@@ -295,7 +295,7 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
}
return;
case Instruction::DOUBLE_TO_LONG:
- if (Gen64Bit()) {
+ if (cu_->target64) {
rl_src = LoadValueWide(rl_src, kFPReg);
// If result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
@@ -569,7 +569,7 @@ void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- if (Gen64Bit()) {
+ if (cu_->target64) {
OpRegCopy(rl_result.reg, rl_src.reg);
// Flip sign bit.
NewLIR2(kX86Rol64RI, rl_result.reg.GetReg(), 1);
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 481b00c08e..bd007e7e61 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -31,7 +31,7 @@ namespace art {
*/
void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2) {
- if (Gen64Bit()) {
+ if (cu_->target64) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -322,7 +322,7 @@ void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
return;
}
- if (Gen64Bit()) {
+ if (cu_->target64) {
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
@@ -376,7 +376,7 @@ void X86Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
bool is_equality_test = ccode == kCondEq || ccode == kCondNe;
- if (Gen64Bit()) {
+ if (cu_->target64) {
if (is_equality_test && val == 0) {
// We can simplify of comparing for ==, != to 0.
NewLIR2(kX86Test64RR, rl_src1.reg.GetReg(), rl_src1.reg.GetReg());
@@ -969,7 +969,7 @@ void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
}
void X86Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
- if (Gen64Bit()) {
+ if (cu_->target64) {
DCHECK(reg.Is64Bit());
NewLIR2(kX86Cmp64RI8, reg.GetReg(), 0);
@@ -1154,7 +1154,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- if (Gen64Bit()) {
+ if (cu_->target64) {
if (rl_src1.is_const) {
std::swap(rl_src1, rl_src2);
}
@@ -1383,7 +1383,7 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
if (rl_src.location == kLocPhysReg) {
// Both operands are in registers.
// But we must ensure that rl_src is in pair
- if (Gen64Bit()) {
+ if (cu_->target64) {
NewLIR2(x86op, rl_dest.reg.GetReg(), rl_src.reg.GetReg());
} else {
rl_src = LoadValueWide(rl_src, kCoreReg);
@@ -1409,10 +1409,10 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
int displacement = SRegOffset(rl_src.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *lir = NewLIR3(x86op, Gen64Bit() ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), r_base, displacement + LOWORD_OFFSET);
+ LIR *lir = NewLIR3(x86op, cu_->target64 ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), r_base, displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
- if (!Gen64Bit()) {
+ if (!cu_->target64) {
x86op = GetOpcode(op, rl_dest, rl_src, true);
lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), r_base, displacement + HIWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
@@ -1444,12 +1444,12 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET,
- Gen64Bit() ? rl_src.reg.GetReg() : rl_src.reg.GetLowReg());
+ cu_->target64 ? rl_src.reg.GetReg() : rl_src.reg.GetLowReg());
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
- if (!Gen64Bit()) {
+ if (!cu_->target64) {
x86op = GetOpcode(op, rl_dest, rl_src, true);
lir = NewLIR3(x86op, r_base, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg());
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
@@ -1503,7 +1503,7 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
// Get one of the source operands into temporary register.
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- if (Gen64Bit()) {
+ if (cu_->target64) {
if (IsTemp(rl_src1.reg)) {
GenLongRegOrMemOp(rl_src1, rl_src2, op);
} else if (is_commutative) {
@@ -1572,7 +1572,7 @@ void X86Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest,
}
void X86Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
- if (Gen64Bit()) {
+ if (cu_->target64) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result;
rl_result = EvalLocWide(rl_dest, kCoreReg, true);
@@ -1586,7 +1586,7 @@ void X86Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, bool is_div) {
- if (!Gen64Bit()) {
+ if (!cu_->target64) {
LOG(FATAL) << "Unexpected use GenDivRemLong()";
return;
}
@@ -1641,7 +1641,7 @@ void X86Mir2Lir::GenDivRemLong(Instruction::Code, RegLocation rl_dest, RegLocati
void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result;
- if (Gen64Bit()) {
+ if (cu_->target64) {
rl_result = EvalLocWide(rl_dest, kCoreReg, true);
OpRegReg(kOpNeg, rl_result.reg, rl_src.reg);
} else {
@@ -1676,7 +1676,7 @@ void X86Mir2Lir::OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> th
void X86Mir2Lir::OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset) {
DCHECK_EQ(kX86_64, cu_->instruction_set);
X86OpCode opcode = kX86Bkpt;
- if (Gen64Bit() && r_dest.Is64BitSolo()) {
+ if (cu_->target64 && r_dest.Is64BitSolo()) {
switch (op) {
case kOpCmp: opcode = kX86Cmp64RT; break;
case kOpMov: opcode = kX86Mov64RT; break;
@@ -1808,7 +1808,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src, int shift_amount) {
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- if (Gen64Bit()) {
+ if (cu_->target64) {
OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
switch (opcode) {
case Instruction::SHL_LONG:
@@ -1982,7 +1982,7 @@ X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation dest, RegLocat
bool is_high_op) {
bool rhs_in_mem = rhs.location != kLocPhysReg;
bool dest_in_mem = dest.location != kLocPhysReg;
- bool is64Bit = Gen64Bit();
+ bool is64Bit = cu_->target64;
DCHECK(!rhs_in_mem || !dest_in_mem);
switch (op) {
case Instruction::ADD_LONG:
@@ -2037,7 +2037,7 @@ X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation dest, RegLocat
X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation loc, bool is_high_op,
int32_t value) {
bool in_mem = loc.location != kLocPhysReg;
- bool is64Bit = Gen64Bit();
+ bool is64Bit = cu_->target64;
bool byte_imm = IS_SIMM8(value);
DCHECK(in_mem || !loc.reg.IsFloat());
switch (op) {
@@ -2111,7 +2111,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
DCHECK(rl_src.is_const);
int64_t val = mir_graph_->ConstantValueWide(rl_src);
- if (Gen64Bit()) {
+ if (cu_->target64) {
// We can do with imm only if it fits 32 bit
if (val != (static_cast<int64_t>(static_cast<int32_t>(val)))) {
return false;
@@ -2196,7 +2196,7 @@ bool X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1,
DCHECK(rl_src2.is_const);
int64_t val = mir_graph_->ConstantValueWide(rl_src2);
- if (Gen64Bit()) {
+ if (cu_->target64) {
// We can do with imm only if it fits 32 bit
if (val != (static_cast<int64_t>(static_cast<int32_t>(val)))) {
return false;
@@ -2384,7 +2384,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
RegLocation rl_result = GetReturn(kRefReg);
// On x86-64 kArg0 is not EAX, so we have to copy ref from kArg0 to EAX.
- if (Gen64Bit()) {
+ if (cu_->target64) {
OpRegCopy(rl_result.reg, TargetReg(kArg0));
}
@@ -2685,7 +2685,7 @@ bool X86Mir2Lir::IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_
}
void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
- if (!Gen64Bit()) {
+ if (!cu_->target64) {
Mir2Lir::GenIntToLong(rl_dest, rl_src);
return;
}
@@ -2706,7 +2706,7 @@ void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift) {
- if (!Gen64Bit()) {
+ if (!cu_->target64) {
Mir2Lir::GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
return;
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 408a40a3fb..b15591b413 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -158,7 +158,7 @@ RegLocation X86Mir2Lir::LocCReturnRef() {
}
RegLocation X86Mir2Lir::LocCReturnWide() {
- return Gen64Bit() ? x86_64_loc_c_return_wide : x86_loc_c_return_wide;
+ return cu_->target64 ? x86_64_loc_c_return_wide : x86_loc_c_return_wide;
}
RegLocation X86Mir2Lir::LocCReturnFloat() {
@@ -196,7 +196,7 @@ RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
case kRet1: res_reg = rs_rX86_RET1; break;
case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
case kHiddenArg: res_reg = rs_rAX; break;
- case kHiddenFpArg: DCHECK(!Gen64Bit()); res_reg = rs_fr0; break;
+ case kHiddenFpArg: DCHECK(!cu_->target64); res_reg = rs_fr0; break;
case kCount: res_reg = rs_rX86_COUNT; break;
default: res_reg = RegStorage::InvalidReg();
}
@@ -425,14 +425,14 @@ void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) {
RegStorage X86Mir2Lir::AllocateByteRegister() {
RegStorage reg = AllocTypedTemp(false, kCoreReg);
- if (!Gen64Bit()) {
+ if (!cu_->target64) {
DCHECK_LT(reg.GetRegNum(), rs_rX86_SP.GetRegNum());
}
return reg;
}
bool X86Mir2Lir::IsByteRegister(RegStorage reg) {
- return Gen64Bit() || reg.GetRegNum() < rs_rX86_SP.GetRegNum();
+ return cu_->target64 || reg.GetRegNum() < rs_rX86_SP.GetRegNum();
}
/* Clobber all regs that might be used by an external C call */
@@ -451,7 +451,7 @@ void X86Mir2Lir::ClobberCallerSave() {
Clobber(rs_fr6);
Clobber(rs_fr7);
- if (Gen64Bit()) {
+ if (cu_->target64) {
Clobber(rs_r8);
Clobber(rs_r9);
Clobber(rs_r10);
@@ -494,7 +494,7 @@ void X86Mir2Lir::LockCallTemps() {
LockTemp(rs_rX86_ARG1);
LockTemp(rs_rX86_ARG2);
LockTemp(rs_rX86_ARG3);
- if (Gen64Bit()) {
+ if (cu_->target64) {
LockTemp(rs_rX86_ARG4);
LockTemp(rs_rX86_ARG5);
LockTemp(rs_rX86_FARG0);
@@ -514,7 +514,7 @@ void X86Mir2Lir::FreeCallTemps() {
FreeTemp(rs_rX86_ARG1);
FreeTemp(rs_rX86_ARG2);
FreeTemp(rs_rX86_ARG3);
- if (Gen64Bit()) {
+ if (cu_->target64) {
FreeTemp(rs_rX86_ARG4);
FreeTemp(rs_rX86_ARG5);
FreeTemp(rs_rX86_FARG0);
@@ -586,7 +586,7 @@ bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
}
void X86Mir2Lir::CompilerInitializeRegAlloc() {
- if (Gen64Bit()) {
+ if (cu_->target64) {
reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, core_regs_64q, sp_regs_64,
dp_regs_64, reserved_regs_64, reserved_regs_64q,
core_temps_64, core_temps_64q, sp_temps_64, dp_temps_64);
@@ -599,7 +599,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
// Target-specific adjustments.
// Add in XMM registers.
- const ArrayRef<const RegStorage> *xp_temps = Gen64Bit() ? &xp_temps_64 : &xp_temps_32;
+ const ArrayRef<const RegStorage> *xp_temps = cu_->target64 ? &xp_temps_64 : &xp_temps_32;
for (RegStorage reg : *xp_temps) {
RegisterInfo* info = new (arena_) RegisterInfo(reg, GetRegMaskCommon(reg));
reginfo_map_.Put(reg.GetReg(), info);
@@ -627,7 +627,7 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
DCHECK_EQ(info->StorageMask(), 0x1U);
}
- if (Gen64Bit()) {
+ if (cu_->target64) {
// Alias 32bit W registers to corresponding 64bit X registers.
GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
@@ -690,7 +690,7 @@ bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
// X86_64 can handle any size.
- if (Gen64Bit()) {
+ if (cu_->target64) {
if (size == kReference) {
return kRefReg;
}
@@ -707,13 +707,13 @@ RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatil
return RegClassBySize(size);
}
-X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit)
+X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena),
base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
method_address_insns_(arena, 100, kGrowableArrayMisc),
class_type_address_insns_(arena, 100, kGrowableArrayMisc),
call_method_insns_(arena, 100, kGrowableArrayMisc),
- stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit),
+ stack_decrement_(nullptr), stack_increment_(nullptr),
const_vectors_(nullptr) {
store_method_addr_used_ = false;
if (kIsDebugBuild) {
@@ -725,7 +725,7 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
}
}
}
- if (Gen64Bit()) {
+ if (cu_->target64) {
rs_rX86_SP = rs_rX86_SP_64;
rs_rX86_ARG0 = rs_rDI;
@@ -798,12 +798,7 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
ArenaAllocator* const arena) {
- return new X86Mir2Lir(cu, mir_graph, arena, false);
-}
-
-Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
- ArenaAllocator* const arena) {
- return new X86Mir2Lir(cu, mir_graph, arena, true);
+ return new X86Mir2Lir(cu, mir_graph, arena);
}
// Not used in x86
@@ -1811,7 +1806,7 @@ void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int co
}
RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
- if (!Gen64Bit()) {
+ if (!cu_->target64) {
return GetCoreArgMappingToPhysicalReg(arg_num);
}
@@ -1851,7 +1846,7 @@ RegStorage X86Mir2Lir::GetCoreArgMappingToPhysicalReg(int core_arg_num) {
* with one location record per word of argument.
*/
void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
- if (!Gen64Bit()) return Mir2Lir::FlushIns(ArgLocs, rl_method);
+ if (!cu_->target64) return Mir2Lir::FlushIns(ArgLocs, rl_method);
/*
* Dummy up a RegLocation for the incoming Method*
* It will attempt to keep kArg0 live (or copy it to home location
@@ -1951,7 +1946,7 @@ int X86Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
const MethodReference& target_method,
uint32_t vtable_idx, uintptr_t direct_code,
uintptr_t direct_method, InvokeType type, bool skip_this) {
- if (!Gen64Bit()) {
+ if (!cu_->target64) {
return Mir2Lir::GenDalvikArgsNoRange(info,
call_state, pcrLabel, next_call_insn,
target_method,
@@ -1985,7 +1980,7 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
const MethodReference& target_method,
uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
InvokeType type, bool skip_this) {
- if (!Gen64Bit()) {
+ if (!cu_->target64) {
return Mir2Lir::GenDalvikArgsRange(info, call_state,
pcrLabel, next_call_insn,
target_method,
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 0352808a7c..392eecfd25 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -492,10 +492,10 @@ LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
}
LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) {
- if (op == kOpMul && !Gen64Bit()) {
+ if (op == kOpMul && !cu_->target64) {
X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value);
- } else if (op == kOpAnd && !Gen64Bit()) {
+ } else if (op == kOpAnd && !cu_->target64) {
if (value == 0xFF && r_src.Low4()) {
return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg());
} else if (value == 0xFFFF) {
@@ -647,7 +647,7 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
DCHECK_EQ((displacement & 0x3), 0);
break;
case kWord:
- if (Gen64Bit()) {
+ if (cu_->target64) {
opcode = is_array ? kX86Mov64RA : kX86Mov64RM;
CHECK_EQ(is_array, false);
CHECK_EQ(r_dest.IsFloat(), false);
@@ -796,7 +796,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
DCHECK_EQ((displacement & 0x3), 0);
break;
case kWord:
- if (Gen64Bit()) {
+ if (cu_->target64) {
opcode = is_array ? kX86Mov64AR : kX86Mov64MR;
CHECK_EQ(is_array, false);
CHECK_EQ(r_src.IsFloat(), false);
@@ -906,7 +906,7 @@ void X86Mir2Lir::AnalyzeMIR() {
// Did we need a pointer to the method code?
if (store_method_addr_) {
- base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempVR, Gen64Bit() == true);
+ base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempVR, cu_->target64 == true);
} else {
base_of_code_ = nullptr;
}