summaryrefslogtreecommitdiffstats
path: root/compiler/dex
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/dex')
-rw-r--r--compiler/dex/compiler_enums.h8
-rw-r--r--compiler/dex/quick/arm/arm_lir.h25
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h138
-rw-r--r--compiler/dex/quick/arm/int_arm.cc9
-rw-r--r--compiler/dex/quick/arm/target_arm.cc397
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc6
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc5
-rw-r--r--compiler/dex/quick/mir_to_lir.h8
-rw-r--r--compiler/dex/quick/quick_compiler.cc26
9 files changed, 569 insertions, 53 deletions
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index c6c5ca7d4f..beeb3adb72 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -60,6 +60,14 @@ enum SpecialTargetRegister {
kFArg5,
kFArg6,
kFArg7,
+ kFArg8,
+ kFArg9,
+ kFArg10,
+ kFArg11,
+ kFArg12,
+ kFArg13,
+ kFArg14,
+ kFArg15,
kRet0,
kRet1,
kInvokeTgt,
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index d935bc30c4..36cb7a4efc 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -297,19 +297,20 @@ constexpr RegStorage rs_dr30(RegStorage::kValid | dr30);
constexpr RegStorage rs_dr31(RegStorage::kValid | dr31);
#endif
-// RegisterLocation templates return values (r0, or r0/r1).
-const RegLocation arm_loc_c_return
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_wide
+// RegisterLocation templates return values (r0, r0/r1, s0, or d0).
+// Note: The return locations are shared between quick code and quick helper. This follows quick
+// ABI. Quick helper assembly routine needs to handle the ABI differences.
+const RegLocation arm_loc_c_return =
+ {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rs_r0, INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_wide =
{kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_float
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_double
- {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
- RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
+ RegStorage::MakeRegPair(rs_r0, rs_r1), INVALID_SREG, INVALID_SREG};
+const RegLocation arm_loc_c_return_float = kArm32QuickCodeUseSoftFloat
+ ? arm_loc_c_return
+ : RegLocation({kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, rs_fr0, INVALID_SREG, INVALID_SREG});
+const RegLocation arm_loc_c_return_double = kArm32QuickCodeUseSoftFloat
+ ? arm_loc_c_return_wide
+ : RegLocation({kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, rs_dr0, INVALID_SREG, INVALID_SREG});
enum ArmShiftEncodings {
kArmLsl = 0x0,
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 6fd29f25dc..442c4fcec6 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -25,6 +25,64 @@
namespace art {
class ArmMir2Lir FINAL : public Mir2Lir {
+ protected:
+ // TODO: Consolidate hard float target support.
+ // InToRegStorageMapper and InToRegStorageMapping can be shared with all backends.
+ // Base class used to get RegStorage for next argument.
+ class InToRegStorageMapper {
+ public:
+ virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide) = 0;
+ virtual ~InToRegStorageMapper() {
+ }
+ };
+
+ // Inherited class for ARM backend.
+ class InToRegStorageArmMapper FINAL : public InToRegStorageMapper {
+ public:
+ InToRegStorageArmMapper()
+ : cur_core_reg_(0), cur_fp_reg_(0), cur_fp_double_reg_(0) {
+ }
+
+ virtual ~InToRegStorageArmMapper() {
+ }
+
+ RegStorage GetNextReg(bool is_double_or_float, bool is_wide) OVERRIDE;
+
+ private:
+ uint32_t cur_core_reg_;
+ uint32_t cur_fp_reg_;
+ uint32_t cur_fp_double_reg_;
+ };
+
+ // Class to map argument to RegStorage. The mapping object is initialized by a mapper.
+ class InToRegStorageMapping FINAL {
+ public:
+ InToRegStorageMapping()
+ : max_mapped_in_(0), is_there_stack_mapped_(false), initialized_(false) {
+ }
+
+ int GetMaxMappedIn() const {
+ return max_mapped_in_;
+ }
+
+ bool IsThereStackMapped() const {
+ return is_there_stack_mapped_;
+ }
+
+ bool IsInitialized() const {
+ return initialized_;
+ }
+
+ void Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper);
+ RegStorage Get(int in_position) const;
+
+ private:
+ std::map<int, RegStorage> mapping_;
+ int max_mapped_in_;
+ bool is_there_stack_mapped_;
+ bool initialized_;
+ };
+
public:
ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
@@ -47,15 +105,30 @@ class ArmMir2Lir FINAL : public Mir2Lir {
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
// Required for target - register utilities.
- RegStorage TargetReg(SpecialTargetRegister reg);
- RegStorage GetArgMappingToPhysicalReg(int arg_num);
- RegLocation GetReturnAlt();
- RegLocation GetReturnWideAlt();
- RegLocation LocCReturn();
- RegLocation LocCReturnRef();
- RegLocation LocCReturnDouble();
- RegLocation LocCReturnFloat();
- RegLocation LocCReturnWide();
+ RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+ RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) OVERRIDE {
+ if (wide_kind == kWide) {
+ DCHECK((kArg0 <= reg && reg < kArg3) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
+ RegStorage ret_reg = RegStorage::MakeRegPair(TargetReg(reg),
+ TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
+ if (ret_reg.IsFloat()) {
+ // Regard double as double, be consistent with register allocation.
+ ret_reg = As64BitFloatReg(ret_reg);
+ }
+ return ret_reg;
+ } else {
+ return TargetReg(reg);
+ }
+ }
+
+ RegStorage GetArgMappingToPhysicalReg(int arg_num) OVERRIDE;
+ RegLocation GetReturnAlt() OVERRIDE;
+ RegLocation GetReturnWideAlt() OVERRIDE;
+ RegLocation LocCReturn() OVERRIDE;
+ RegLocation LocCReturnRef() OVERRIDE;
+ RegLocation LocCReturnDouble() OVERRIDE;
+ RegLocation LocCReturnFloat() OVERRIDE;
+ RegLocation LocCReturnWide() OVERRIDE;
ResourceMask GetRegMaskCommon(const RegStorage& reg) const OVERRIDE;
void AdjustSpillMask();
void ClobberCallerSave();
@@ -210,6 +283,19 @@ class ArmMir2Lir FINAL : public Mir2Lir {
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
size_t GetInstructionOffset(LIR* lir);
+ int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this) OVERRIDE;
+ int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
+ NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx,
+ uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
+ bool skip_this) OVERRIDE;
+
private:
void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
void GenMulLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -226,10 +312,10 @@ class ArmMir2Lir FINAL : public Mir2Lir {
RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
bool is_div, int flags) OVERRIDE;
RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) OVERRIDE;
- typedef struct {
+ struct EasyMultiplyOp {
OpKind op;
uint32_t shift;
- } EasyMultiplyOp;
+ };
bool GetEasyMultiplyOp(int lit, EasyMultiplyOp* op);
bool GetEasyMultiplyTwoOps(int lit, EasyMultiplyOp* ops);
void GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops);
@@ -239,6 +325,36 @@ class ArmMir2Lir FINAL : public Mir2Lir {
static constexpr ResourceMask EncodeArmRegFpcsList(int reg_list);
ArenaVector<LIR*> call_method_insns_;
+
+ /**
+ * @brief Given float register pair, returns Solo64 float register.
+ * @param reg #RegStorage containing a float register pair (e.g. @c s2 and @c s3).
+ * @return A Solo64 float mapping to the register pair (e.g. @c d1).
+ */
+ static RegStorage As64BitFloatReg(RegStorage reg) {
+ DCHECK(reg.IsFloat());
+
+ RegStorage low = reg.GetLow();
+ RegStorage high = reg.GetHigh();
+ DCHECK((low.GetRegNum() % 2 == 0) && (low.GetRegNum() + 1 == high.GetRegNum()));
+
+ return RegStorage::FloatSolo64(low.GetRegNum() / 2);
+ }
+
+ /**
+ * @brief Given Solo64 float register, returns float register pair.
+ * @param reg #RegStorage containing a Solo64 float register (e.g. @c d1).
+ * @return A float register pair mapping to the Solo64 float pair (e.g. @c s2 and s3).
+ */
+ static RegStorage As64BitFloatRegPair(RegStorage reg) {
+ DCHECK(reg.IsDouble() && reg.Is64BitSolo());
+
+ int reg_num = reg.GetRegNum();
+ return RegStorage::MakeRegPair(RegStorage::FloatSolo32(reg_num * 2),
+ RegStorage::FloatSolo32(reg_num * 2 + 1));
+ }
+
+ InToRegStorageMapping in_to_reg_storage_mapping_;
};
} // namespace art
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 9742243632..8e08f5fb9d 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -442,6 +442,15 @@ void ArmMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
bool src_fp = r_src.IsFloat();
DCHECK(r_dest.Is64Bit());
DCHECK(r_src.Is64Bit());
+ // Note: If the register is get by register allocator, it should never be a pair.
+ // But some functions in mir_2_lir assume 64-bit registers are 32-bit register pairs.
+ // TODO: Rework Mir2Lir::LoadArg() and Mir2Lir::LoadArgDirect().
+ if (dest_fp && r_dest.IsPair()) {
+ r_dest = As64BitFloatReg(r_dest);
+ }
+ if (src_fp && r_src.IsPair()) {
+ r_src = As64BitFloatReg(r_src);
+ }
if (dest_fp) {
if (src_fp) {
OpRegCopy(r_dest, r_src);
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index dd8f7fe3d8..7100a285a6 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -89,7 +89,7 @@ RegLocation ArmMir2Lir::LocCReturnDouble() {
// Return a target-dependent special register.
RegStorage ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
- RegStorage res_reg = RegStorage::InvalidReg();
+ RegStorage res_reg;
switch (reg) {
case kSelf: res_reg = rs_rARM_SELF; break;
#ifdef ARM_R4_SUSPEND_FLAG
@@ -104,10 +104,22 @@ RegStorage ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
case kArg1: res_reg = rs_r1; break;
case kArg2: res_reg = rs_r2; break;
case kArg3: res_reg = rs_r3; break;
- case kFArg0: res_reg = rs_r0; break;
- case kFArg1: res_reg = rs_r1; break;
- case kFArg2: res_reg = rs_r2; break;
- case kFArg3: res_reg = rs_r3; break;
+ case kFArg0: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r0 : rs_fr0; break;
+ case kFArg1: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r1 : rs_fr1; break;
+ case kFArg2: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r2 : rs_fr2; break;
+ case kFArg3: res_reg = kArm32QuickCodeUseSoftFloat ? rs_r3 : rs_fr3; break;
+ case kFArg4: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr4; break;
+ case kFArg5: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr5; break;
+ case kFArg6: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr6; break;
+ case kFArg7: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr7; break;
+ case kFArg8: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr8; break;
+ case kFArg9: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr9; break;
+ case kFArg10: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr10; break;
+ case kFArg11: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr11; break;
+ case kFArg12: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr12; break;
+ case kFArg13: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr13; break;
+ case kFArg14: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr14; break;
+ case kFArg15: res_reg = kArm32QuickCodeUseSoftFloat ? RegStorage::InvalidReg() : rs_fr15; break;
case kRet0: res_reg = rs_r0; break;
case kRet1: res_reg = rs_r1; break;
case kInvokeTgt: res_reg = rs_rARM_LR; break;
@@ -119,20 +131,6 @@ RegStorage ArmMir2Lir::TargetReg(SpecialTargetRegister reg) {
return res_reg;
}
-RegStorage ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
- // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
- switch (arg_num) {
- case 0:
- return rs_r1;
- case 1:
- return rs_r2;
- case 2:
- return rs_r3;
- default:
- return RegStorage::InvalidReg();
- }
-}
-
/*
* Decode the register id.
*/
@@ -718,6 +716,32 @@ void ArmMir2Lir::LockCallTemps() {
LockTemp(rs_r1);
LockTemp(rs_r2);
LockTemp(rs_r3);
+ if (!kArm32QuickCodeUseSoftFloat) {
+ LockTemp(rs_fr0);
+ LockTemp(rs_fr1);
+ LockTemp(rs_fr2);
+ LockTemp(rs_fr3);
+ LockTemp(rs_fr4);
+ LockTemp(rs_fr5);
+ LockTemp(rs_fr6);
+ LockTemp(rs_fr7);
+ LockTemp(rs_fr8);
+ LockTemp(rs_fr9);
+ LockTemp(rs_fr10);
+ LockTemp(rs_fr11);
+ LockTemp(rs_fr12);
+ LockTemp(rs_fr13);
+ LockTemp(rs_fr14);
+ LockTemp(rs_fr15);
+ LockTemp(rs_dr0);
+ LockTemp(rs_dr1);
+ LockTemp(rs_dr2);
+ LockTemp(rs_dr3);
+ LockTemp(rs_dr4);
+ LockTemp(rs_dr5);
+ LockTemp(rs_dr6);
+ LockTemp(rs_dr7);
+ }
}
/* To be used when explicitly managing register use */
@@ -726,6 +750,32 @@ void ArmMir2Lir::FreeCallTemps() {
FreeTemp(rs_r1);
FreeTemp(rs_r2);
FreeTemp(rs_r3);
+ if (!kArm32QuickCodeUseSoftFloat) {
+ FreeTemp(rs_fr0);
+ FreeTemp(rs_fr1);
+ FreeTemp(rs_fr2);
+ FreeTemp(rs_fr3);
+ FreeTemp(rs_fr4);
+ FreeTemp(rs_fr5);
+ FreeTemp(rs_fr6);
+ FreeTemp(rs_fr7);
+ FreeTemp(rs_fr8);
+ FreeTemp(rs_fr9);
+ FreeTemp(rs_fr10);
+ FreeTemp(rs_fr11);
+ FreeTemp(rs_fr12);
+ FreeTemp(rs_fr13);
+ FreeTemp(rs_fr14);
+ FreeTemp(rs_fr15);
+ FreeTemp(rs_dr0);
+ FreeTemp(rs_dr1);
+ FreeTemp(rs_dr2);
+ FreeTemp(rs_dr3);
+ FreeTemp(rs_dr4);
+ FreeTemp(rs_dr5);
+ FreeTemp(rs_dr6);
+ FreeTemp(rs_dr7);
+ }
}
RegStorage ArmMir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
@@ -847,4 +897,313 @@ void ArmMir2Lir::InstallLiteralPools() {
Mir2Lir::InstallLiteralPools();
}
+RegStorage ArmMir2Lir::InToRegStorageArmMapper::GetNextReg(bool is_double_or_float, bool is_wide) {
+ const RegStorage coreArgMappingToPhysicalReg[] =
+ {rs_r1, rs_r2, rs_r3};
+ const int coreArgMappingToPhysicalRegSize = arraysize(coreArgMappingToPhysicalReg);
+ const RegStorage fpArgMappingToPhysicalReg[] =
+ {rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
+ rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15};
+ const uint32_t fpArgMappingToPhysicalRegSize = arraysize(fpArgMappingToPhysicalReg);
+ COMPILE_ASSERT(fpArgMappingToPhysicalRegSize % 2 == 0, knum_of_fp_arg_regs_not_even);
+
+ if (kArm32QuickCodeUseSoftFloat) {
+ is_double_or_float = false; // Regard double as long, float as int.
+ is_wide = false; // Map long separately.
+ }
+
+ RegStorage result = RegStorage::InvalidReg();
+ if (is_double_or_float) {
+ // TODO: Remove "cur_fp_double_reg_ % 2 != 0" when we return double as double.
+ if (is_wide || cur_fp_double_reg_ % 2 != 0) {
+ cur_fp_double_reg_ = std::max(cur_fp_double_reg_, RoundUp(cur_fp_reg_, 2));
+ if (cur_fp_double_reg_ < fpArgMappingToPhysicalRegSize) {
+ // TODO: Replace by following code in the branch when FlushIns() support 64-bit registers.
+ // result = RegStorage::MakeRegPair(fpArgMappingToPhysicalReg[cur_fp_double_reg_],
+ // fpArgMappingToPhysicalReg[cur_fp_double_reg_ + 1]);
+ // result = As64BitFloatReg(result);
+ // cur_fp_double_reg_ += 2;
+ result = fpArgMappingToPhysicalReg[cur_fp_double_reg_];
+ cur_fp_double_reg_++;
+ }
+ } else {
+ // TODO: Remove the check when we return double as double.
+ DCHECK_EQ(cur_fp_double_reg_ % 2, 0U);
+ if (cur_fp_reg_ % 2 == 0) {
+ cur_fp_reg_ = std::max(cur_fp_double_reg_, cur_fp_reg_);
+ }
+ if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
+ result = fpArgMappingToPhysicalReg[cur_fp_reg_];
+ cur_fp_reg_++;
+ }
+ }
+ } else {
+ if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
+ result = coreArgMappingToPhysicalReg[cur_core_reg_++];
+ // TODO: Enable following code when FlushIns() support 64-bit registers.
+ // if (is_wide && cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
+ // result = RegStorage::MakeRegPair(result, coreArgMappingToPhysicalReg[cur_core_reg_++]);
+ // }
+ }
+ }
+ return result;
+}
+
+RegStorage ArmMir2Lir::InToRegStorageMapping::Get(int in_position) const {
+ DCHECK(IsInitialized());
+ auto res = mapping_.find(in_position);
+ return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
+}
+
+void ArmMir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count,
+ InToRegStorageMapper* mapper) {
+ DCHECK(mapper != nullptr);
+ max_mapped_in_ = -1;
+ is_there_stack_mapped_ = false;
+ for (int in_position = 0; in_position < count; in_position++) {
+ RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp,
+ arg_locs[in_position].wide);
+ if (reg.Valid()) {
+ mapping_[in_position] = reg;
+ // TODO: Enable the following code when FlushIns() support 64-bit argument registers.
+ // if (arg_locs[in_position].wide) {
+ // if (reg.Is32Bit()) {
+ // // As it is a split long, the hi-part is on stack.
+ // is_there_stack_mapped_ = true;
+ // }
+ // // We covered 2 v-registers, so skip the next one
+ // in_position++;
+ // }
+ max_mapped_in_ = std::max(max_mapped_in_, in_position);
+ } else {
+ is_there_stack_mapped_ = true;
+ }
+ }
+ initialized_ = true;
+}
+
+// TODO: Should be able to return long, double registers.
+// Need check some common code as it will break some assumption.
+RegStorage ArmMir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
+ if (!in_to_reg_storage_mapping_.IsInitialized()) {
+ int start_vreg = mir_graph_->GetFirstInVR();
+ RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
+
+ InToRegStorageArmMapper mapper;
+ in_to_reg_storage_mapping_.Initialize(arg_locs, mir_graph_->GetNumOfInVRs(), &mapper);
+ }
+ return in_to_reg_storage_mapping_.Get(arg_num);
+}
+
+int ArmMir2Lir::GenDalvikArgsNoRange(CallInfo* info,
+ int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type, bool skip_this) {
+ if (kArm32QuickCodeUseSoftFloat) {
+ return Mir2Lir::GenDalvikArgsNoRange(info, call_state, pcrLabel, next_call_insn, target_method,
+ vtable_idx, direct_code, direct_method, type, skip_this);
+ } else {
+ return GenDalvikArgsRange(info, call_state, pcrLabel, next_call_insn, target_method, vtable_idx,
+ direct_code, direct_method, type, skip_this);
+ }
+}
+
+int ArmMir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
+ LIR** pcrLabel, NextCallInsn next_call_insn,
+ const MethodReference& target_method,
+ uint32_t vtable_idx, uintptr_t direct_code,
+ uintptr_t direct_method, InvokeType type, bool skip_this) {
+ if (kArm32QuickCodeUseSoftFloat) {
+ return Mir2Lir::GenDalvikArgsRange(info, call_state, pcrLabel, next_call_insn, target_method,
+ vtable_idx, direct_code, direct_method, type, skip_this);
+ }
+
+ // TODO: Rework the implementation when argument register can be long or double.
+
+ /* If no arguments, just return */
+ if (info->num_arg_words == 0) {
+ return call_state;
+ }
+
+ const int start_index = skip_this ? 1 : 0;
+
+ InToRegStorageArmMapper mapper;
+ InToRegStorageMapping in_to_reg_storage_mapping;
+ in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
+ const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
+ int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + 1);
+
+ // First of all, check whether it makes sense to use bulk copying.
+ // Bulk copying is done only for the range case.
+ // TODO: make a constant instead of 2
+ if (info->is_range && regs_left_to_pass_via_stack >= 2) {
+ // Scan the rest of the args - if in phys_reg flush to memory
+ for (int next_arg = last_mapped_in + 1; next_arg < info->num_arg_words;) {
+ RegLocation loc = info->args[next_arg];
+ if (loc.wide) {
+ // TODO: Only flush hi-part.
+ if (loc.high_word) {
+ loc = info->args[--next_arg];
+ }
+ loc = UpdateLocWide(loc);
+ if (loc.location == kLocPhysReg) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ }
+ next_arg += 2;
+ } else {
+ loc = UpdateLoc(loc);
+ if (loc.location == kLocPhysReg) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ if (loc.ref) {
+ StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
+ } else {
+ StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
+ kNotVolatile);
+ }
+ }
+ next_arg++;
+ }
+ }
+
+ // The rest can be copied together
+ int start_offset = SRegOffset(info->args[last_mapped_in + 1].s_reg_low);
+ int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + 1,
+ cu_->instruction_set);
+
+ int current_src_offset = start_offset;
+ int current_dest_offset = outs_offset;
+
+ // Only davik regs are accessed in this loop; no next_call_insn() calls.
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ while (regs_left_to_pass_via_stack > 0) {
+ /*
+ * TODO: Improve by adding block copy for large number of arguments. This
+ * should be done, if possible, as a target-depending helper. For now, just
+ * copy a Dalvik vreg at a time.
+ */
+ // Moving 32-bits via general purpose register.
+ size_t bytes_to_move = sizeof(uint32_t);
+
+ // Instead of allocating a new temp, simply reuse one of the registers being used
+ // for argument passing.
+ RegStorage temp = TargetReg(kArg3, kNotWide);
+
+ // Now load the argument VR and store to the outs.
+ Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
+ Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
+
+ current_src_offset += bytes_to_move;
+ current_dest_offset += bytes_to_move;
+ regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
+ }
+ DCHECK_EQ(regs_left_to_pass_via_stack, 0);
+ }
+
+ // Now handle rest not registers if they are
+ if (in_to_reg_storage_mapping.IsThereStackMapped()) {
+ RegStorage regWide = TargetReg(kArg2, kWide);
+ for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) {
+ RegLocation rl_arg = info->args[i];
+ rl_arg = UpdateRawLoc(rl_arg);
+ RegStorage reg = in_to_reg_storage_mapping.Get(i);
+ // TODO: Only pass split wide hi-part via stack.
+ if (!reg.Valid() || rl_arg.wide) {
+ int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
+
+ {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ if (rl_arg.wide) {
+ if (rl_arg.location == kLocPhysReg) {
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
+ } else {
+ LoadValueDirectWideFixed(rl_arg, regWide);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
+ }
+ } else {
+ if (rl_arg.location == kLocPhysReg) {
+ if (rl_arg.ref) {
+ StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
+ } else {
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
+ }
+ } else {
+ if (rl_arg.ref) {
+ RegStorage regSingle = TargetReg(kArg2, kRef);
+ LoadValueDirectFixed(rl_arg, regSingle);
+ StoreRefDisp(TargetPtrReg(kSp), out_offset, regSingle, kNotVolatile);
+ } else {
+ RegStorage regSingle = TargetReg(kArg2, kNotWide);
+ LoadValueDirectFixed(rl_arg, regSingle);
+ StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
+ }
+ }
+ }
+ }
+
+ call_state = next_call_insn(cu_, info, call_state, target_method,
+ vtable_idx, direct_code, direct_method, type);
+ }
+ if (rl_arg.wide) {
+ i++;
+ }
+ }
+ }
+
+ // Finish with mapped registers
+ for (int i = start_index; i <= last_mapped_in; i++) {
+ RegLocation rl_arg = info->args[i];
+ rl_arg = UpdateRawLoc(rl_arg);
+ RegStorage reg = in_to_reg_storage_mapping.Get(i);
+ if (reg.Valid()) {
+ if (reg.Is64Bit()) {
+ LoadValueDirectWideFixed(rl_arg, reg);
+ } else {
+ // TODO: Only split long should be the case we need to care about.
+ if (rl_arg.wide) {
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ int high_word = rl_arg.high_word ? 1 : 0;
+ rl_arg = high_word ? info->args[i - 1] : rl_arg;
+ if (rl_arg.location == kLocPhysReg) {
+ RegStorage rs_arg = rl_arg.reg;
+ if (rs_arg.IsDouble() && rs_arg.Is64BitSolo()) {
+ rs_arg = As64BitFloatRegPair(rs_arg);
+ }
+ RegStorage rs_arg_low = rs_arg.GetLow();
+ RegStorage rs_arg_high = rs_arg.GetHigh();
+ OpRegCopy(reg, high_word ? rs_arg_high : rs_arg_low);
+ } else {
+ Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low + high_word), reg);
+ }
+ } else {
+ LoadValueDirectFixed(rl_arg, reg);
+ }
+ }
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+ direct_code, direct_method, type);
+ }
+ if (reg.Is64Bit()) {
+ i++;
+ }
+ }
+
+ call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
+ direct_code, direct_method, type);
+ if (pcrLabel) {
+ if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
+ *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
+ } else {
+ *pcrLabel = nullptr;
+ // In lieu of generating a check for kArg1 being null, we need to
+ // perform a load when doing implicit checks.
+ RegStorage tmp = AllocTemp();
+ Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
+ MarkPossibleNullPointerException(info->opt_flags);
+ FreeTemp(tmp);
+ }
+ }
+ return call_state;
+}
+
} // namespace art
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 09acf4cf17..ce2de65abf 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -1007,6 +1007,12 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora
// Intentional fall-though.
case k64:
if (r_src.IsFloat()) {
+ // Note: If the register is retrieved by register allocator, it should never be a pair.
+ // But some functions in mir2lir assume 64-bit registers are 32-bit register pairs.
+ // TODO: Rework Mir2Lir::LoadArg() and Mir2Lir::LoadArgDirect().
+ if (r_src.IsPair()) {
+ r_src = As64BitFloatReg(r_src);
+ }
DCHECK(!r_src.IsPair());
store = LoadStoreUsingInsnWithOffsetImm8Shl2(kThumb2Vstrd, r_base, displacement, r_src);
} else {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 2bef7c53c5..bc4d00b6cd 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -248,13 +248,13 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampo
if (cu_->instruction_set == kMips) {
LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide));
} else {
- LoadValueDirectFixed(arg1, TargetReg(kArg1, kNotWide));
+ LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide));
}
} else {
if (cu_->instruction_set == kMips) {
LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
} else {
- LoadValueDirectWideFixed(arg1, TargetReg(kArg1, kWide));
+ LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide));
}
}
} else {
@@ -365,6 +365,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
* ArgLocs is an array of location records describing the incoming arguments
* with one location record per word of argument.
*/
+// TODO: Support 64-bit argument registers.
void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
/*
* Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 3e0844bec1..f4e6dfead2 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1191,13 +1191,17 @@ class Mir2Lir : public Backend {
*/
virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
if (wide_kind == kWide) {
- DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg7) || (kRet0 == reg));
+ DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg));
COMPILE_ASSERT((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
(kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
(kArg7 == kArg6 + 1), kargs_range_unexpected);
COMPILE_ASSERT((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
(kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
- (kFArg7 == kFArg6 + 1), kfargs_range_unexpected);
+ (kFArg7 == kFArg6 + 1) && (kFArg8 == kFArg7 + 1) && (kFArg9 == kFArg8 + 1) &&
+ (kFArg10 == kFArg9 + 1) && (kFArg11 == kFArg10 + 1) &&
+ (kFArg12 == kFArg11 + 1) && (kFArg13 == kFArg12 + 1) &&
+ (kFArg14 == kFArg13 + 1) && (kFArg15 == kFArg14 + 1),
+ kfargs_range_unexpected);
COMPILE_ASSERT(kRet1 == kRet0 + 1, kret_range_unexpected);
return RegStorage::MakeRegPair(TargetReg(reg),
TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 6f2a647313..8f7bd3033a 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -425,6 +425,21 @@ static int kAllOpcodes[] = {
kMirOpSelect,
};
+static int kInvokeOpcodes[] = {
+ Instruction::INVOKE_VIRTUAL,
+ Instruction::INVOKE_SUPER,
+ Instruction::INVOKE_DIRECT,
+ Instruction::INVOKE_STATIC,
+ Instruction::INVOKE_INTERFACE,
+ Instruction::INVOKE_VIRTUAL_RANGE,
+ Instruction::INVOKE_SUPER_RANGE,
+ Instruction::INVOKE_DIRECT_RANGE,
+ Instruction::INVOKE_STATIC_RANGE,
+ Instruction::INVOKE_INTERFACE_RANGE,
+ Instruction::INVOKE_VIRTUAL_QUICK,
+ Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
+};
+
// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
// recorded below.
static const int* kUnsupportedOpcodes[] = {
@@ -523,8 +538,8 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil
for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
// Check if we support the byte code.
- if (std::find(unsupport_list, unsupport_list + unsupport_list_size,
- opcode) != unsupport_list + unsupport_list_size) {
+ if (std::find(unsupport_list, unsupport_list + unsupport_list_size, opcode)
+ != unsupport_list + unsupport_list_size) {
if (!MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
VLOG(compiler) << "Unsupported dalvik byte code : "
<< mir->dalvikInsn.opcode;
@@ -535,11 +550,8 @@ bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_fil
return false;
}
// Check if it invokes a prototype that we cannot support.
- if (Instruction::INVOKE_VIRTUAL == opcode ||
- Instruction::INVOKE_SUPER == opcode ||
- Instruction::INVOKE_DIRECT == opcode ||
- Instruction::INVOKE_STATIC == opcode ||
- Instruction::INVOKE_INTERFACE == opcode) {
+ if (std::find(kInvokeOpcodes, kInvokeOpcodes + arraysize(kInvokeOpcodes), opcode)
+ != kInvokeOpcodes + arraysize(kInvokeOpcodes)) {
uint32_t invoke_method_idx = mir->dalvikInsn.vB;
const char* invoke_method_shorty = dex_file.GetMethodShorty(
dex_file.GetMethodId(invoke_method_idx));