summaryrefslogtreecommitdiffstats
path: root/compiler/dex/quick
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/dex/quick')
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc7
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc2
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h57
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc23
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc317
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc21
-rw-r--r--compiler/dex/quick/gen_common.cc74
-rw-r--r--compiler/dex/quick/gen_invoke.cc41
-rw-r--r--compiler/dex/quick/mir_to_lir.cc10
-rw-r--r--compiler/dex/quick/ralloc_util.cc4
-rw-r--r--compiler/dex/quick/x86/call_x86.cc10
-rw-r--r--compiler/dex/quick/x86/fp_x86.cc4
-rw-r--r--compiler/dex/quick/x86/int_x86.cc12
13 files changed, 451 insertions, 131 deletions
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 93621471f7..c5bd005abf 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -688,9 +688,10 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
// Fail, if `expected' contains an unsatisfied requirement.
if (expected != nullptr) {
- // TODO(Arm64): make this FATAL.
- LOG(WARNING) << "Bad argument n. " << i << " of " << encoder->name
- << ". Expected " << expected << ", got 0x" << std::hex << operand;
+ LOG(WARNING) << "Method: " << PrettyMethod(cu_->method_idx, *cu_->dex_file)
+ << " @ 0x" << std::hex << lir->dalvik_offset;
+ LOG(FATAL) << "Bad argument n. " << i << " of " << encoder->name
+ << ". Expected " << expected << ", got 0x" << std::hex << operand;
}
}
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 59eec3d486..f1748effb2 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -142,7 +142,7 @@ void Arm64Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
// Load the displacement from the switch table
RegStorage disp_reg = AllocTemp();
// TODO(Arm64): generate "ldr w3, [x1,w2,sxtw #2]" rather than "ldr w3, [x1,x2,lsl #2]"?
- LoadBaseIndexed(table_base, key_reg, As64BitReg(disp_reg), 2, k32);
+ LoadBaseIndexed(table_base, As64BitReg(key_reg), As64BitReg(disp_reg), 2, k32);
// Get base branch address.
RegStorage branch_reg = AllocTempWide();
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 9a80c69918..a79c4fabca 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -20,9 +20,45 @@
#include "arm64_lir.h"
#include "dex/compiler_internals.h"
+#include <map>
+
namespace art {
class Arm64Mir2Lir : public Mir2Lir {
+ protected:
+ // TODO: consolidate 64-bit target support.
+ class InToRegStorageMapper {
+ public:
+ virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide) = 0;
+ virtual ~InToRegStorageMapper() {}
+ };
+
+ class InToRegStorageArm64Mapper : public InToRegStorageMapper {
+ public:
+ InToRegStorageArm64Mapper() : cur_core_reg_(0), cur_fp_reg_(0) {}
+ virtual ~InToRegStorageArm64Mapper() {}
+ virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide);
+ private:
+ int cur_core_reg_;
+ int cur_fp_reg_;
+ };
+
+ class InToRegStorageMapping {
+ public:
+ InToRegStorageMapping() : max_mapped_in_(0), is_there_stack_mapped_(false),
+ initialized_(false) {}
+ void Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper);
+ int GetMaxMappedIn() { return max_mapped_in_; }
+ bool IsThereStackMapped() { return is_there_stack_mapped_; }
+ RegStorage Get(int in_position);
+ bool IsInitialized() { return initialized_; }
+ private:
+ std::map<int, RegStorage> mapping_;
+ int max_mapped_in_;
+ bool is_there_stack_mapped_;
+ bool initialized_;
+ };
+
public:
Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
@@ -219,12 +255,21 @@ class Arm64Mir2Lir : public Mir2Lir {
bool InexpensiveConstantDouble(int64_t value);
void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
- int LoadArgRegs(CallInfo* info, int call_state,
- NextCallInsn next_call_insn,
- const MethodReference& target_method,
- uint32_t vtable_idx,
- uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
- bool skip_this);
+
+ int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this);
+
+ int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this);
+ InToRegStorageMapping in_to_reg_storage_mapping_;
private:
/**
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 8112c2ec6d..2c6b11d015 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -551,8 +551,11 @@ LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
// Decrement register and branch on condition
LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
- // Combine sub & test using sub setflags encoding here
- OpRegRegImm(kOpSub, reg, reg, 1); // For value == 1, this should set flags.
+ // Combine sub & test using sub setflags encoding here. We need to make sure a
+ // subtract form that sets carry is used, so generate explicitly.
+ // TODO: might be best to add a new op, kOpSubs, and handle it generically.
+ ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
+ NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1); // For value == 1, this should set flags.
DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
return OpCondBranch(c_code, target);
}
@@ -676,9 +679,6 @@ void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, Reg
*/
void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_dest, int scale) {
- // TODO(Arm64): check this.
- UNIMPLEMENTED(WARNING);
-
RegisterClass reg_class = RegClassBySize(size);
int len_offset = mirror::Array::LengthOffset().Int32Value();
int data_offset;
@@ -720,7 +720,8 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
} else {
// No special indexed operation, lea + load w/ displacement
reg_ptr = AllocTempRef();
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kA64Lsl, scale));
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
+ EncodeShift(kA64Lsl, scale));
FreeTemp(rl_index.reg);
}
rl_result = EvalLoc(rl_dest, reg_class, true);
@@ -754,7 +755,7 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
+ LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
MarkPossibleNullPointerException(opt_flags);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -767,9 +768,6 @@ void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
*/
void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
- // TODO(Arm64): check this.
- UNIMPLEMENTED(WARNING);
-
RegisterClass reg_class = RegClassBySize(size);
int len_offset = mirror::Array::LengthOffset().Int32Value();
bool constant_index = rl_index.is_const;
@@ -825,7 +823,8 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
rl_src = LoadValue(rl_src, reg_class);
}
if (!constant_index) {
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, rl_index.reg, EncodeShift(kA64Lsl, scale));
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
+ EncodeShift(kA64Lsl, scale));
}
if (needs_range_check) {
if (constant_index) {
@@ -846,7 +845,7 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
GenArrayBoundsCheck(rl_index.reg, reg_len);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
+ StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
MarkPossibleNullPointerException(opt_flags);
}
if (allocated_reg_ptr_temp) {
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index e2846aebc3..fba368aa8c 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -105,7 +105,6 @@ RegLocation Arm64Mir2Lir::LocCReturnDouble() {
// Return a target-dependent special register.
RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
- // TODO(Arm64): this function doesn't work for hard-float ABI.
RegStorage res_reg = RegStorage::InvalidReg();
switch (reg) {
case kSelf: res_reg = rs_rA64_SELF; break;
@@ -117,12 +116,20 @@ RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
case kArg1: res_reg = rs_x1; break;
case kArg2: res_reg = rs_x2; break;
case kArg3: res_reg = rs_x3; break;
+ case kArg4: res_reg = rs_x4; break;
+ case kArg5: res_reg = rs_x5; break;
+ case kArg6: res_reg = rs_x6; break;
+ case kArg7: res_reg = rs_x7; break;
case kFArg0: res_reg = rs_f0; break;
case kFArg1: res_reg = rs_f1; break;
case kFArg2: res_reg = rs_f2; break;
case kFArg3: res_reg = rs_f3; break;
+ case kFArg4: res_reg = rs_f4; break;
+ case kFArg5: res_reg = rs_f5; break;
+ case kFArg6: res_reg = rs_f6; break;
+ case kFArg7: res_reg = rs_f7; break;
case kRet0: res_reg = rs_x0; break;
- case kRet1: res_reg = rs_x0; break;
+ case kRet1: res_reg = rs_x1; break;
case kInvokeTgt: res_reg = rs_rA64_LR; break;
case kHiddenArg: res_reg = rs_x12; break;
case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
@@ -132,10 +139,6 @@ RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
return res_reg;
}
-RegStorage Arm64Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
- return RegStorage::InvalidReg();
-}
-
/*
* Decode the register id. This routine makes assumptions on the encoding made by RegStorage.
*/
@@ -738,18 +741,44 @@ RegLocation Arm64Mir2Lir::GetReturnAlt() {
/* To be used when explicitly managing register use */
void Arm64Mir2Lir::LockCallTemps() {
+ // TODO: needs cleanup.
LockTemp(rs_x0);
LockTemp(rs_x1);
LockTemp(rs_x2);
LockTemp(rs_x3);
+ LockTemp(rs_x4);
+ LockTemp(rs_x5);
+ LockTemp(rs_x6);
+ LockTemp(rs_x7);
+ LockTemp(rs_f0);
+ LockTemp(rs_f1);
+ LockTemp(rs_f2);
+ LockTemp(rs_f3);
+ LockTemp(rs_f4);
+ LockTemp(rs_f5);
+ LockTemp(rs_f6);
+ LockTemp(rs_f7);
}
/* To be used when explicitly managing register use */
void Arm64Mir2Lir::FreeCallTemps() {
+ // TODO: needs cleanup.
FreeTemp(rs_x0);
FreeTemp(rs_x1);
FreeTemp(rs_x2);
FreeTemp(rs_x3);
+ FreeTemp(rs_x4);
+ FreeTemp(rs_x5);
+ FreeTemp(rs_x6);
+ FreeTemp(rs_x7);
+ FreeTemp(rs_f0);
+ FreeTemp(rs_f1);
+ FreeTemp(rs_f2);
+ FreeTemp(rs_f3);
+ FreeTemp(rs_f4);
+ FreeTemp(rs_f5);
+ FreeTemp(rs_f6);
+ FreeTemp(rs_f7);
}
RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
@@ -786,6 +815,69 @@ const char* Arm64Mir2Lir::GetTargetInstFmt(int opcode) {
return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].fmt;
}
+RegStorage Arm64Mir2Lir::InToRegStorageArm64Mapper::GetNextReg(bool is_double_or_float,
+ bool is_wide) {
+ const RegStorage coreArgMappingToPhysicalReg[] =
+ {rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7};
+ const int coreArgMappingToPhysicalRegSize =
+ sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage);
+ const RegStorage fpArgMappingToPhysicalReg[] =
+ {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7};
+ const int fpArgMappingToPhysicalRegSize =
+ sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage);
+
+ RegStorage result = RegStorage::InvalidReg();
+ if (is_double_or_float) {
+ if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
+ result = fpArgMappingToPhysicalReg[cur_fp_reg_++];
+ if (result.Valid()) {
+ // TODO: switching between widths remains a bit ugly. Better way?
+ int res_reg = result.GetReg();
+ result = is_wide ? RegStorage::FloatSolo64(res_reg) : RegStorage::FloatSolo32(res_reg);
+ }
+ }
+ } else {
+ if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
+ result = coreArgMappingToPhysicalReg[cur_core_reg_++];
+ if (result.Valid()) {
+ // TODO: switching between widths remains a bit ugly. Better way?
+ int res_reg = result.GetReg();
+ result = is_wide ? RegStorage::Solo64(res_reg) : RegStorage::Solo32(res_reg);
+ }
+ }
+ }
+ return result;
+}
+
+RegStorage Arm64Mir2Lir::InToRegStorageMapping::Get(int in_position) {
+ DCHECK(IsInitialized());
+ auto res = mapping_.find(in_position);
+ return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
+}
+
+void Arm64Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count,
+ InToRegStorageMapper* mapper) {
+ DCHECK(mapper != nullptr);
+ max_mapped_in_ = -1;
+ is_there_stack_mapped_ = false;
+ for (int in_position = 0; in_position < count; in_position++) {
+ RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide);
+ if (reg.Valid()) {
+ mapping_[in_position] = reg;
+ max_mapped_in_ = std::max(max_mapped_in_, in_position);
+ if (reg.Is64BitSolo()) {
+ // We covered 2 args, so skip the next one
+ in_position++;
+ }
+ } else {
+ is_there_stack_mapped_ = true;
+ }
+ }
+ initialized_ = true;
+}
+
+
+// Deprecate. Use the new mechanism.
// TODO(Arm64): reuse info in QuickArgumentVisitor?
static RegStorage GetArgPhysicalReg(RegLocation* loc, int* num_gpr_used, int* num_fpr_used,
OpSize* op_size) {
@@ -805,7 +897,7 @@ static RegStorage GetArgPhysicalReg(RegLocation* loc, int* num_gpr_used, int* nu
}
} else {
int n = *num_gpr_used;
- if (n < 7) {
+ if (n < 8) {
*num_gpr_used = n + 1;
if (loc->wide) {
*op_size = k64;
@@ -820,6 +912,18 @@ static RegStorage GetArgPhysicalReg(RegLocation* loc, int* num_gpr_used, int* nu
return RegStorage::InvalidReg();
}
+RegStorage Arm64Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
+ if (!in_to_reg_storage_mapping_.IsInitialized()) {
+ int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
+ RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
+
+ InToRegStorageArm64Mapper mapper;
+ in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
+ }
+ return in_to_reg_storage_mapping_.Get(arg_num);
+}
+
+
/*
* If there are any ins passed in registers that have not been promoted
* to a callee-save register, flush them to the frame. Perform initial
@@ -888,33 +992,188 @@ void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
}
}
-int Arm64Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
- NextCallInsn next_call_insn,
- const MethodReference& target_method,
- uint32_t vtable_idx, uintptr_t direct_code,
- uintptr_t direct_method, InvokeType type, bool skip_this) {
- int last_arg_reg = TargetReg(kArg3).GetReg();
- int next_reg = TargetReg(kArg1).GetReg();
- int next_arg = 0;
- if (skip_this) {
- next_reg++;
- next_arg++;
+/*
+ * Load up to 5 arguments, the first three of which will be in
+ * kArg1 .. kArg3. On entry kArg0 contains the current method pointer,
+ * and as part of the load sequence, it must be replaced with
+ * the target method pointer.
+ */
+int Arm64Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
+ int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type, bool skip_this) {
+ return GenDalvikArgsRange(info,
+ call_state, pcrLabel, next_call_insn,
+ target_method,
+ vtable_idx, direct_code,
+ direct_method, type, skip_this);
+}
+
+/*
+ * May have 0+ arguments (also used for jumbo). Note that
+ * source virtual registers may be in physical registers, so may
+ * need to be flushed to home location before copying. This
+ * applies to arg3 and above (see below).
+ *
+ * FIXME: update comments.
+ *
+ * Two general strategies:
+ * If < 20 arguments
+ * Pass args 3-18 using vldm/vstm block copy
+ * Pass arg0, arg1 & arg2 in kArg1-kArg3
+ * If 20+ arguments
+ * Pass args arg19+ using memcpy block copy
+ * Pass arg0, arg1 & arg2 in kArg1-kArg3
+ *
+ */
+int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
+ LIR** pcrLabel, NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type, bool skip_this) {
+ /* If no arguments, just return */
+ if (info->num_arg_words == 0)
+ return call_state;
+
+ const int start_index = skip_this ? 1 : 0;
+
+ InToRegStorageArm64Mapper mapper;
+ InToRegStorageMapping in_to_reg_storage_mapping;
+ in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
+ const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
+ const int size_of_the_last_mapped = last_mapped_in == -1 ? 1 :
+ in_to_reg_storage_mapping.Get(last_mapped_in).Is64BitSolo() ? 2 : 1;
+ int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + size_of_the_last_mapped);
+
+ // Fisrt of all, check whether it make sense to use bulk copying
+ // Optimization is aplicable only for range case
+ // TODO: make a constant instead of 2
+ if (info->is_range && regs_left_to_pass_via_stack >= 2) {
+ // Scan the rest of the args - if in phys_reg flush to memory
+ for (int next_arg = last_mapped_in + size_of_the_last_mapped; next_arg < info->num_arg_words;) {
+ RegLocation loc = info->args[next_arg];
+ if (loc.wide) {
+ loc = UpdateLocWide(loc);
+ if (loc.location == kLocPhysReg) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64);
+ }
+ next_arg += 2;
+ } else {
+ loc = UpdateLoc(loc);
+ if (loc.location == kLocPhysReg) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32);
+ }
+ next_arg++;
+ }
+ }
+
+ // Logic below assumes that Method pointer is at offset zero from SP.
+ DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
+
+ // The rest can be copied together
+ int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low);
+ int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped,
+ cu_->instruction_set);
+
+ int current_src_offset = start_offset;
+ int current_dest_offset = outs_offset;
+
+ // Only davik regs are accessed in this loop; no next_call_insn() calls.
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ while (regs_left_to_pass_via_stack > 0) {
+ /*
+ * TODO: Improve by adding block copy for large number of arguments. This
+ * should be done, if possible, as a target-depending helper. For now, just
+ * copy a Dalvik vreg at a time.
+ */
+ // Moving 32-bits via general purpose register.
+ size_t bytes_to_move = sizeof(uint32_t);
+
+ // Instead of allocating a new temp, simply reuse one of the registers being used
+ // for argument passing.
+ RegStorage temp = TargetReg(kArg3);
+
+ // Now load the argument VR and store to the outs.
+ Load32Disp(TargetReg(kSp), current_src_offset, temp);
+ Store32Disp(TargetReg(kSp), current_dest_offset, temp);
+
+ current_src_offset += bytes_to_move;
+ current_dest_offset += bytes_to_move;
+ regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
+ }
+ DCHECK_EQ(regs_left_to_pass_via_stack, 0);
+ }
+
+ // Now handle rest not registers if they are
+ if (in_to_reg_storage_mapping.IsThereStackMapped()) {
+ RegStorage regSingle = TargetReg(kArg2);
+ RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg());
+ for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) {
+ RegLocation rl_arg = info->args[i];
+ rl_arg = UpdateRawLoc(rl_arg);
+ RegStorage reg = in_to_reg_storage_mapping.Get(i);
+ if (!reg.Valid()) {
+ int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
+
+ {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ if (rl_arg.wide) {
+ if (rl_arg.location == kLocPhysReg) {
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64);
+ } else {
+ LoadValueDirectWideFixed(rl_arg, regWide);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64);
+ }
+ i++;
+ } else {
+ if (rl_arg.location == kLocPhysReg) {
+ StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32);
+ } else {
+ LoadValueDirectFixed(rl_arg, regSingle);
+ StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32);
+ }
+ }
+ }
+ call_state = next_call_insn(cu_, info, call_state, target_method,
+ vtable_idx, direct_code, direct_method, type);
+ }
+ }
}
- for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
- RegLocation rl_arg = info->args[next_arg++];
+
+ // Finish with mapped registers
+ for (int i = start_index; i <= last_mapped_in; i++) {
+ RegLocation rl_arg = info->args[i];
rl_arg = UpdateRawLoc(rl_arg);
- if (rl_arg.wide && (next_reg <= TargetReg(kArg2).GetReg())) {
- LoadValueDirectWideFixed(rl_arg, RegStorage::Solo64(next_reg));
- next_arg++;
- } else {
+ RegStorage reg = in_to_reg_storage_mapping.Get(i);
+ if (reg.Valid()) {
if (rl_arg.wide) {
- rl_arg = NarrowRegLoc(rl_arg);
- rl_arg.is_const = false;
+ LoadValueDirectWideFixed(rl_arg, reg);
+ i++;
+ } else {
+ LoadValueDirectFixed(rl_arg, reg);
}
- LoadValueDirectFixed(rl_arg, RegStorage::Solo32(next_reg));
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+ direct_code, direct_method, type);
+ }
+ }
+
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+ direct_code, direct_method, type);
+ if (pcrLabel) {
+ if (Runtime::Current()->ExplicitNullChecks()) {
+ *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
+ } else {
+ *pcrLabel = nullptr;
+ // In lieu of generating a check for kArg1 being null, we need to
+ // perform a load when doing implicit checks.
+ RegStorage tmp = AllocTemp();
+ Load32Disp(TargetReg(kArg1), 0, tmp);
+ MarkPossibleNullPointerException(info->opt_flags);
+ FreeTemp(tmp);
}
- call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
- direct_code, direct_method, type);
}
return call_state;
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 71e9e95ab0..f38429398c 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -112,7 +112,7 @@ LIR* Arm64Mir2Lir::LoadFPConstantValue(int r_dest, int32_t value) {
LIR* Arm64Mir2Lir::LoadFPConstantValueWide(int r_dest, int64_t value) {
DCHECK(RegStorage::IsDouble(r_dest));
if (value == 0) {
- return NewLIR2(kA64Fmov2Sx, r_dest, rwzr);
+ return NewLIR2(kA64Fmov2Sx, r_dest, rxzr);
} else {
int32_t encoded_imm = EncodeImmDouble(value);
if (encoded_imm >= 0) {
@@ -778,6 +778,11 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
LIR* load;
int expected_scale = 0;
ArmOpcode opcode = kA64Brk1d;
+ DCHECK(r_base.Is64Bit());
+ // TODO: need a cleaner handling of index registers here and throughout.
+ if (r_index.Is32Bit()) {
+ r_index = As64BitReg(r_index);
+ }
if (r_dest.IsFloat()) {
if (r_dest.IsDouble()) {
@@ -846,6 +851,11 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
LIR* store;
int expected_scale = 0;
ArmOpcode opcode = kA64Brk1d;
+ DCHECK(r_base.Is64Bit());
+ // TODO: need a cleaner handling of index registers here and throughout.
+ if (r_index.Is32Bit()) {
+ r_index = As64BitReg(r_index);
+ }
if (r_src.IsFloat()) {
if (r_src.IsDouble()) {
@@ -968,8 +978,9 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
load = NewLIR3(alt_opcode, r_dest.GetReg(), r_base.GetReg(), displacement);
} else {
// Use long sequence.
- RegStorage r_scratch = AllocTemp();
- LoadConstant(r_scratch, displacement);
+ // TODO: cleaner support for index/displacement registers? Not a reference, but must match width.
+ RegStorage r_scratch = AllocTempWide();
+ LoadConstantWide(r_scratch, displacement);
load = LoadBaseIndexed(r_base, r_scratch, r_dest, 0, size);
FreeTemp(r_scratch);
}
@@ -1050,8 +1061,8 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
store = NewLIR3(alt_opcode, r_src.GetReg(), r_base.GetReg(), displacement);
} else {
// Use long sequence.
- RegStorage r_scratch = AllocTemp();
- LoadConstant(r_scratch, displacement);
+ RegStorage r_scratch = AllocTempWide();
+ LoadConstantWide(r_scratch, displacement);
store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
FreeTemp(r_scratch);
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index f9081cea08..3b99421a6a 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -73,7 +73,7 @@ void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) {
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) {
+ if (m2l_->cu_->target64) {
m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowDivZero), true);
} else {
m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowDivZero), true);
@@ -96,7 +96,7 @@ void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) {
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) {
+ if (m2l_->cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
index_, length_, true);
} else {
@@ -129,7 +129,7 @@ void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) {
m2l_->OpRegCopy(m2l_->TargetReg(kArg1), length_);
m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_);
- if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) {
+ if (m2l_->cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true);
} else {
@@ -158,7 +158,7 @@ LIR* Mir2Lir::GenNullCheck(RegStorage reg) {
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoThrowTarget);
- if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) {
+ if (m2l_->cu_->target64) {
m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pThrowNullPointer), true);
} else {
m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer), true);
@@ -385,7 +385,7 @@ static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu,
*/
void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
RegLocation rl_src) {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenNewArrayImpl<8>(this, cu_, type_idx, rl_dest, rl_src);
} else {
GenNewArrayImpl<4>(this, cu_, type_idx, rl_dest, rl_src);
@@ -414,7 +414,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
int elems = info->num_arg_words;
int type_idx = info->index;
FlushAllRegs(); /* Everything to home location */
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenFilledNewArrayCall<8>(this, cu_, elems, type_idx);
} else {
GenFilledNewArrayCall<4>(this, cu_, elems, type_idx);
@@ -457,12 +457,13 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) {
* critical.
*/
// This is addressing the stack, which may be out of the 4G area.
- RegStorage r_src = cu_->target64 ? AllocTempWide() : AllocTemp();
- RegStorage r_dst = AllocTemp();
- RegStorage r_idx = AllocTemp();
+ RegStorage r_src = AllocTempRef();
+ RegStorage r_dst = AllocTempRef();
+ RegStorage r_idx = AllocTempRef(); // Not really a reference, but match src/dst.
RegStorage r_val;
switch (cu_->instruction_set) {
case kThumb2:
+ case kArm64:
r_val = TargetReg(kLr);
break;
case kX86:
@@ -531,7 +532,7 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
void Compile() {
LIR* unresolved_target = GenerateTargetLabel();
uninit_->target = unresolved_target;
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
m2l_->CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeStaticStorage),
storage_index_, true);
} else {
@@ -640,7 +641,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
FreeTemp(r_base);
} else {
FlushAllRegs(); // Everything to home locations
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenSputCall<8>(this, is_long_or_double, is_object, &field_info, rl_src);
} else {
GenSputCall<4>(this, is_long_or_double, is_object, &field_info, rl_src);
@@ -734,7 +735,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
}
} else {
FlushAllRegs(); // Everything to home locations
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenSgetCall<8>(this, is_long_or_double, is_object, &field_info);
} else {
GenSgetCall<4>(this, is_long_or_double, is_object, &field_info);
@@ -801,7 +802,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
StoreValue(rl_dest, rl_result);
}
} else {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenIgetCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj);
} else {
GenIgetCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj);
@@ -861,7 +862,7 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
MarkGCCard(rl_src.reg, rl_obj.reg);
}
} else {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenIputCall<8>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src);
} else {
GenIputCall<4>(this, is_long_or_double, is_object, &field_info, rl_obj, rl_src);
@@ -885,7 +886,7 @@ void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl
bool needs_range_check = !(opt_flags & MIR_IGNORE_RANGE_CHECK);
bool needs_null_check = !((cu_->disable_opt & (1 << kNullCheckElimination)) &&
(opt_flags & MIR_IGNORE_NULL_CHECK));
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenArrayObjPutCall<8>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src);
} else {
GenArrayObjPutCall<4>(this, needs_range_check, needs_null_check, rl_array, rl_index, rl_src);
@@ -894,14 +895,15 @@ void Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl
void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
RegLocation rl_method = LoadCurrMethod();
- RegStorage res_reg = AllocTemp();
+ DCHECK(!cu_->target64 || rl_method.reg.Is64Bit());
+ RegStorage res_reg = AllocTempRef();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
*cu_->dex_file,
type_idx)) {
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
type_idx, rl_method.reg, true);
} else {
@@ -936,7 +938,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) {
void Compile() {
GenerateTargetLabel();
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_,
rl_method_.reg, true);
} else {
@@ -1005,7 +1007,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) {
void Compile() {
GenerateTargetLabel();
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
m2l_->CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pResolveString),
r_method_, string_idx_, true);
} else {
@@ -1094,7 +1096,7 @@ static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_
* call Class::NewInstanceFromCode(type_idx, method);
*/
void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenNewInstanceImpl<8>(this, cu_, type_idx, rl_dest);
} else {
GenNewInstanceImpl<4>(this, cu_, type_idx, rl_dest);
@@ -1103,7 +1105,7 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
void Mir2Lir::GenThrow(RegLocation rl_src) {
FlushAllRegs();
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pDeliverException), rl_src, true);
} else {
CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pDeliverException), rl_src, true);
@@ -1182,7 +1184,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
type_idx, true);
} else {
@@ -1207,7 +1209,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
// Not resolved
// Call out to helper, which will return resolved type in kRet0
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true);
} else {
CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true);
@@ -1247,7 +1249,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
}
} else {
if (cu_->instruction_set == kThumb2) {
- RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ?
+ RegStorage r_tgt = cu_->target64 ?
LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) :
LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
LIR* it = nullptr;
@@ -1269,7 +1271,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know
LoadConstant(rl_result.reg, 1); // assume true
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
- RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ?
+ RegStorage r_tgt = cu_->target64 ?
LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) :
LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
@@ -1332,7 +1334,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kRet0
// InitializeTypeAndVerifyAccess(idx, method)
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
type_idx, TargetReg(kArg1), true);
} else {
@@ -1368,7 +1370,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
// Call out to helper, which will return resolved type in kArg0
// InitializeTypeFromCode(idx, method)
- if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) {
+ if (m2l_->cu_->target64) {
m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_,
m2l_->TargetReg(kArg1), true);
} else {
@@ -1405,7 +1407,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_
m2l_->LoadRefDisp(m2l_->TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(),
m2l_->TargetReg(kArg1));
}
- if (Is64BitInstructionSet(m2l_->cu_->instruction_set)) {
+ if (m2l_->cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetReg(kArg2),
m2l_->TargetReg(kArg1), true);
} else {
@@ -1520,7 +1522,7 @@ static void GenShiftOpLongCall(Mir2Lir* mir_to_lir, Instruction::Code opcode, Re
void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift) {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenShiftOpLongCall<8>(this, opcode, rl_src1, rl_shift);
} else {
GenShiftOpLongCall<4>(this, opcode, rl_src1, rl_shift);
@@ -1653,7 +1655,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
if (!done) {
FlushAllRegs(); /* Send everything to home location */
LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
- RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ?
+ RegStorage r_tgt = cu_->target64 ?
CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) :
CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod));
LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
@@ -1661,7 +1663,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
GenDivZeroCheck(TargetReg(kArg1));
}
// NOTE: callout here is not a safepoint.
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), false /* not a safepoint */);
} else {
CallHelper(r_tgt, QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), false /* not a safepoint */);
@@ -1924,7 +1926,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re
FlushAllRegs(); /* Everything to home location. */
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
Clobber(TargetReg(kArg0));
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0), lit,
false);
} else {
@@ -2104,7 +2106,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc
void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
GenArithOpLongImpl<8>(this, cu_, opcode, rl_dest, rl_src1, rl_src2);
} else {
GenArithOpLongImpl<4>(this, cu_, opcode, rl_dest, rl_src1, rl_src2);
@@ -2156,7 +2158,7 @@ class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
m2l_->ResetRegPool();
m2l_->ResetDefTracking();
GenerateTargetLabel(kPseudoSuspendTarget);
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(8, pTestSuspend), true);
} else {
m2l_->CallRuntimeHelper(QUICK_ENTRYPOINT_OFFSET(4, pTestSuspend), true);
@@ -2215,7 +2217,7 @@ void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
/* Call out to helper assembly routine that will null check obj and then lock it. */
void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
FlushAllRegs();
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pLockObject), rl_src, true);
} else {
CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pLockObject), rl_src, true);
@@ -2225,7 +2227,7 @@ void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
/* Call out to helper assembly routine that will null check obj and then unlock it. */
void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
FlushAllRegs();
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject), rl_src, true);
} else {
CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject), rl_src, true);
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index a90a06e1ba..641579f354 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -156,7 +156,7 @@ void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_off
LoadValueDirectFixed(arg0, TargetReg(kArg0));
} else {
RegStorage r_tmp;
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
r_tmp = RegStorage::Solo64(TargetReg(kArg0).GetReg());
} else {
r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
@@ -187,7 +187,7 @@ void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_
LoadValueDirectFixed(arg1, TargetReg(kArg1));
} else {
RegStorage r_tmp;
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
r_tmp = RegStorage::Solo64(TargetReg(kArg1).GetReg());
} else {
r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
@@ -309,7 +309,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size>
LoadValueDirectWideFixed(arg1, r_tmp);
} else {
RegStorage r_tmp;
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
r_tmp = RegStorage::Solo64(TargetReg(kArg1).GetReg());
} else {
r_tmp = RegStorage::MakeRegPair(TargetReg(kArg1), TargetReg(kArg2));
@@ -320,13 +320,13 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size>
} else {
RegStorage r_tmp;
if (arg0.fp) {
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
r_tmp = RegStorage::FloatSolo64(TargetReg(kFArg0).GetReg());
} else {
r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg0), TargetReg(kFArg1));
}
} else {
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
r_tmp = RegStorage::Solo64(TargetReg(kArg0).GetReg());
} else {
r_tmp = RegStorage::MakeRegPair(TargetReg(kArg0), TargetReg(kArg1));
@@ -334,7 +334,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size>
}
LoadValueDirectWideFixed(arg0, r_tmp);
if (arg1.wide == 0) {
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
} else {
LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
@@ -342,13 +342,13 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size>
} else {
RegStorage r_tmp;
if (arg1.fp) {
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
r_tmp = RegStorage::FloatSolo64(TargetReg(kFArg1).GetReg());
} else {
r_tmp = RegStorage::MakeRegPair(TargetReg(kFArg2), TargetReg(kFArg3));
}
} else {
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
r_tmp = RegStorage::Solo64(TargetReg(kArg1).GetReg());
} else {
r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
@@ -440,7 +440,7 @@ void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_si
LoadValueDirectFixed(arg2, TargetReg(kArg2));
} else {
RegStorage r_tmp;
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
r_tmp = RegStorage::Solo64(TargetReg(kArg2).GetReg());
} else {
r_tmp = RegStorage::MakeRegPair(TargetReg(kArg2), TargetReg(kArg3));
@@ -779,7 +779,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (Is64BitInstructionSet(cu->instruction_set)) {
+ if (cu->target64) {
ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeStaticTrampolineWithAccessCheck);
return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
} else {
@@ -792,7 +792,7 @@ static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (Is64BitInstructionSet(cu->instruction_set)) {
+ if (cu->target64) {
ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeDirectTrampolineWithAccessCheck);
return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
} else {
@@ -805,7 +805,7 @@ static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (Is64BitInstructionSet(cu->instruction_set)) {
+ if (cu->target64) {
ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeSuperTrampolineWithAccessCheck);
return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
} else {
@@ -818,7 +818,7 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (Is64BitInstructionSet(cu->instruction_set)) {
+ if (cu->target64) {
ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeVirtualTrampolineWithAccessCheck);
return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
} else {
@@ -832,7 +832,7 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
const MethodReference& target_method,
uint32_t unused, uintptr_t unused2,
uintptr_t unused3, InvokeType unused4) {
- if (Is64BitInstructionSet(cu->instruction_set)) {
+ if (cu->target64) {
ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeInterfaceTrampolineWithAccessCheck);
return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0);
} else {
@@ -1188,7 +1188,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
// Generate memcpy
OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetReg(kArg0),
TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
} else {
@@ -1540,7 +1540,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
RegLocation rl_start = info->args[2]; // 3rd arg only present in III flavor of IndexOf.
LoadValueDirectFixed(rl_start, reg_start);
}
- RegStorage r_tgt = Is64BitInstructionSet(cu_->instruction_set) ?
+ RegStorage r_tgt = cu_->target64 ?
LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pIndexOf)) :
LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pIndexOf));
GenExplicitNullCheck(reg_ptr, info->opt_flags);
@@ -1581,7 +1581,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
LoadValueDirectFixed(rl_cmp, reg_cmp);
RegStorage r_tgt;
if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
} else {
r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
@@ -1598,7 +1598,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
OpReg(kOpBlx, r_tgt);
} else {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pStringCompareTo));
} else {
OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo));
@@ -1747,7 +1747,8 @@ void Mir2Lir::GenInvoke(CallInfo* info) {
DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
// TODO: Enable instrinsics for x86_64
// Temporary disable intrinsics for x86_64. We will enable them later step by step.
- if (cu_->instruction_set != kX86_64) {
+ // Temporary disable intrinsics for Arm64. We will enable them later step by step.
+ if ((cu_->instruction_set != kX86_64) && (cu_->instruction_set != kArm64)) {
if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
->GenIntrinsic(this, info)) {
return;
@@ -1850,7 +1851,7 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
}
} else {
// TODO: Extract?
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
call_inst = GenInvokeNoInlineCall<8>(this, info->type);
} else {
call_inst = GenInvokeNoInlineCall<4>(this, info->type);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 40205eabd6..1fc416301c 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -66,7 +66,7 @@ void Mir2Lir::LockArg(int in_position, bool wide) {
}
}
-// TODO: needs revisit for 64-bit.
+// TODO: simplify when 32-bit targets go hard-float.
RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
@@ -87,10 +87,11 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide)
offset += sizeof(uint64_t);
}
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
RegStorage reg_arg = GetArgMappingToPhysicalReg(in_position);
if (!reg_arg.Valid()) {
- RegStorage new_reg = wide ? AllocTypedTempWide(false, reg_class) : AllocTypedTemp(false, reg_class);
+ RegStorage new_reg =
+ wide ? AllocTypedTempWide(false, reg_class) : AllocTypedTemp(false, reg_class);
LoadBaseDisp(TargetReg(kSp), offset, new_reg, wide ? k64 : k32);
return new_reg;
} else {
@@ -159,6 +160,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide)
return reg_arg;
}
+// TODO: simpilfy when 32-bit targets go hard float.
void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
@@ -186,7 +188,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
Load32Disp(TargetReg(kSp), offset, rl_dest.reg);
}
} else {
- if (cu_->instruction_set == kX86_64) {
+ if (cu_->target64) {
RegStorage reg = GetArgMappingToPhysicalReg(in_position);
if (reg.Valid()) {
OpRegCopy(rl_dest.reg, reg);
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index cae59c88c1..5bb0ee04d4 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -473,14 +473,14 @@ RegStorage Mir2Lir::AllocLiveReg(int s_reg, int reg_class, bool wide) {
reg = FindLiveReg(wide ? reg_pool_->dp_regs_ : reg_pool_->sp_regs_, s_reg);
}
if (!reg.Valid() && (reg_class != kFPReg)) {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
reg = FindLiveReg(wide ? reg_pool_->core64_regs_ : reg_pool_->core_regs_, s_reg);
} else {
reg = FindLiveReg(reg_pool_->core_regs_, s_reg);
}
}
if (reg.Valid()) {
- if (wide && !reg.IsFloat() && !Is64BitInstructionSet(cu_->instruction_set)) {
+ if (wide && !reg.IsFloat() && !cu_->target64) {
// Only allow reg pairs for core regs on 32-bit targets.
RegStorage high_reg = FindLiveReg(reg_pool_->core_regs_, s_reg + 1);
if (high_reg.Valid()) {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index f5fce34f2b..dd5dab290d 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -175,7 +175,7 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
}
NewLIR2(kX86PcRelAdr, rs_rX86_ARG1.GetReg(), WrapPointer(tab_rec));
NewLIR2(Gen64Bit() ? kX86Add64RR : kX86Add32RR, rs_rX86_ARG1.GetReg(), rs_rX86_ARG2.GetReg());
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData), rs_rX86_ARG0,
rs_rX86_ARG1, true);
} else {
@@ -185,7 +185,7 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
}
void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
- int ex_offset = Is64BitInstructionSet(cu_->instruction_set) ?
+ int ex_offset = cu_->target64 ?
Thread::ExceptionOffset<8>().Int32Value() :
Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
@@ -201,7 +201,7 @@ void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
RegStorage reg_card_base = AllocTemp();
RegStorage reg_card_no = AllocTemp();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
- int ct_offset = Is64BitInstructionSet(cu_->instruction_set) ?
+ int ct_offset = cu_->target64 ?
Thread::CardTableOffset<8>().Int32Value() :
Thread::CardTableOffset<4>().Int32Value();
if (Gen64Bit()) {
@@ -255,7 +255,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
m2l_->ClobberCallerSave();
// Assumes codegen and target are in thumb2 mode.
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
m2l_->CallHelper(RegStorage::InvalidReg(), QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow),
false /* MarkSafepointPC */, false /* UseLink */);
} else {
@@ -276,7 +276,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
// in case a signal comes in that's not using an alternate signal stack and the large frame may
// have moved us outside of the reserved area at the end of the stack.
// cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
} else {
OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index f6f06170bb..61623d0051 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -49,7 +49,7 @@ void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode,
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pFmodf), rl_src1, rl_src2,
false);
} else {
@@ -111,7 +111,7 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pFmod), rl_src1, rl_src2,
false);
} else {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 05b5e4354d..69075c03e1 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -991,7 +991,7 @@ void X86Mir2Lir::GenArrayBoundsCheck(RegStorage index,
}
// Load array length to kArg1.
m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1), array_base_, len_offset_);
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
new_index, m2l_->TargetReg(kArg1), true);
} else {
@@ -1031,7 +1031,7 @@ void X86Mir2Lir::GenArrayBoundsCheck(int32_t index,
// Load array length to kArg1.
m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1), array_base_, len_offset_);
m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_);
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true);
} else {
@@ -1054,7 +1054,7 @@ void X86Mir2Lir::GenArrayBoundsCheck(int32_t index,
// Test suspend flag, return target of taken suspend branch
LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
OpTlsCmp(Thread::ThreadFlagsOffset<8>(), 0);
} else {
OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
@@ -2311,7 +2311,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// Caller function returns Class* in kArg0.
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeTypeAndVerifyAccess),
type_idx, true);
} else {
@@ -2337,7 +2337,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
// Need to test presence of type in dex cache at runtime.
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
// Type is not resolved. Call out to helper, which will return resolved type in kRet0/kArg0.
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx, true);
} else {
CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true);
@@ -2375,7 +2375,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
- if (Is64BitInstructionSet(cu_->instruction_set)) {
+ if (cu_->target64) {
OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial));
} else {
OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));