diff options
author | Vladimir Marko <vmarko@google.com> | 2014-05-07 17:31:25 +0000 |
---|---|---|
committer | Gerrit Code Review <noreply-gerritcodereview@google.com> | 2014-05-07 17:31:26 +0000 |
commit | 410d87ff51e9432768924d2f294592818f93c244 (patch) | |
tree | dc1de40eb82349e3b70cafe6c5f48021fed93ee9 /compiler | |
parent | 052a647973b590c9d5007a2e16f313f4e32a70bd (diff) | |
parent | 3bf7c60a86d49bf8c05c5d2ac5ca8e9f80bd9824 (diff) | |
download | android_art-410d87ff51e9432768924d2f294592818f93c244.tar.gz android_art-410d87ff51e9432768924d2f294592818f93c244.tar.bz2 android_art-410d87ff51e9432768924d2f294592818f93c244.zip |
Merge "Cleanup ARM load/store wide and remove unused param s_reg."
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/dex/quick/arm/codegen_arm.h | 18 | ||||
-rw-r--r-- | compiler/dex/quick/arm/int_arm.cc | 4 | ||||
-rw-r--r-- | compiler/dex/quick/arm/utility_arm.cc | 95 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/codegen_arm64.h | 18 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/utility_arm64.cc | 18 | ||||
-rw-r--r-- | compiler/dex/quick/gen_common.cc | 10 | ||||
-rw-r--r-- | compiler/dex/quick/gen_invoke.cc | 9 | ||||
-rw-r--r-- | compiler/dex/quick/gen_loadstore.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/mips/codegen_mips.h | 18 | ||||
-rw-r--r-- | compiler/dex/quick/mips/int_mips.cc | 4 | ||||
-rw-r--r-- | compiler/dex/quick/mips/utility_mips.cc | 13 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.cc | 6 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 16 | ||||
-rw-r--r-- | compiler/dex/quick/x86/codegen_x86.h | 15 | ||||
-rw-r--r-- | compiler/dex/quick/x86/fp_x86.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/x86/int_x86.cc | 21 | ||||
-rw-r--r-- | compiler/dex/quick/x86/utility_x86.cc | 17 |
17 files changed, 134 insertions, 152 deletions
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h index 8b4576c56a..aab6b46d4c 100644 --- a/compiler/dex/quick/arm/codegen_arm.h +++ b/compiler/dex/quick/arm/codegen_arm.h @@ -32,19 +32,20 @@ class ArmMir2Lir FINAL : public Mir2Lir { bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE; LIR* CheckSuspendUsingLoad() OVERRIDE; RegStorage LoadHelper(ThreadOffset<4> offset); - LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, - int s_reg); + LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, + OpSize size) OVERRIDE; LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, - OpSize size); + OpSize size) OVERRIDE; LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, - RegStorage r_dest, OpSize size, int s_reg); + RegStorage r_dest, OpSize size) OVERRIDE; LIR* LoadConstantNoClobber(RegStorage r_dest, int value); LIR* LoadConstantWide(RegStorage r_dest, int64_t value); - LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); + LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, + OpSize size) OVERRIDE; LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, - OpSize size); + OpSize size) OVERRIDE; LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, - RegStorage r_src, OpSize size, int s_reg); + RegStorage r_src, OpSize size) OVERRIDE; void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg); // Required for target - register utilities. @@ -171,8 +172,7 @@ class ArmMir2Lir FINAL : public Mir2Lir { void OpRegCopyWide(RegStorage dest, RegStorage src); void OpTlsCmp(ThreadOffset<4> offset, int val); - LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, - int s_reg); + LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size); LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2, int shift); diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index 8dd31d18ee..0948ce335b 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -692,7 +692,7 @@ bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { } else { DCHECK(size == kSignedByte || size == kSignedHalf || size == k32); // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0. - LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG); + LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size); StoreValue(rl_dest, rl_result); } return true; @@ -1170,7 +1170,7 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, } FreeTemp(reg_len); } - LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, INVALID_SREG); + LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size); MarkPossibleNullPointerException(opt_flags); if (!constant_index) { FreeTemp(reg_ptr); diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index b7b9093b1d..1745c184e2 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -825,7 +825,7 @@ LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStor * performing null check, incoming MIR can be null. */ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, - OpSize size, int s_reg) { + OpSize size) { LIR* load = NULL; ArmOpcode opcode = kThumbBkpt; bool short_form = false; @@ -833,30 +833,32 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag bool all_low = r_dest.Is32Bit() && r_base.Low8() && r_dest.Low8(); int encoded_disp = displacement; bool already_generated = false; - bool null_pointer_safepoint = false; switch (size) { case kDouble: // Intentional fall-though. - case k64: + case k64: { + DCHECK_EQ(displacement & 3, 0); + encoded_disp = (displacement & 1020) >> 2; // Within range of kThumb2Vldrd/kThumb2LdrdI8. + RegStorage r_ptr = r_base; + if ((displacement & ~1020) != 0) { + // For core register load, use the r_dest.GetLow() for the temporary pointer. + r_ptr = r_dest.IsFloat() ? AllocTemp() : r_dest.GetLow(); + // Add displacement & ~1020 to base, it's a single instruction for up to +-256KiB. + OpRegRegImm(kOpAdd, r_ptr, r_base, displacement & ~1020); + } if (r_dest.IsFloat()) { DCHECK(!r_dest.IsPair()); - opcode = kThumb2Vldrd; - if (displacement <= 1020) { - short_form = true; - encoded_disp >>= 2; - } + load = NewLIR3(kThumb2Vldrd, r_dest.GetReg(), r_ptr.GetReg(), encoded_disp); } else { - if (displacement <= 1020) { - load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(), - displacement >> 2); - } else { - load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), k32, s_reg); - null_pointer_safepoint = true; - LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), k32, INVALID_SREG); - } - already_generated = true; + load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(), + encoded_disp); + } + if ((displacement & ~1020) != 0 && !r_dest.IsFloat()) { + FreeTemp(r_ptr); } + already_generated = true; break; + } case kSingle: // Intentional fall-though. case k32: @@ -935,7 +937,7 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag if (r_dest.IsFloat()) { // No index ops - must use a long sequence. Turn the offset into a direct pointer. OpRegReg(kOpAdd, reg_offset, r_base); - load = LoadBaseDispBody(reg_offset, 0, r_dest, size, s_reg); + load = LoadBaseDispBody(reg_offset, 0, r_dest, size); } else { load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size); } @@ -946,22 +948,16 @@ LIR* ArmMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorag // TODO: in future may need to differentiate Dalvik accesses w/ spills if (r_base == rs_rARM_SP) { AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit()); - } else { - // We might need to generate a safepoint if we have two store instructions (wide or double). - if (!Runtime::Current()->ExplicitNullChecks() && null_pointer_safepoint) { - MarkSafepointPC(load); - } } return load; } -LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, - int s_reg) { +LIR* ArmMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size) { // TODO: base this on target. if (size == kWord) { size = k32; } - return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg); + return LoadBaseDispBody(r_base, displacement, r_dest, size); } @@ -974,29 +970,31 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora bool all_low = r_src.Is32Bit() && r_base.Low8() && r_src.Low8(); int encoded_disp = displacement; bool already_generated = false; - bool null_pointer_safepoint = false; switch (size) { - case k64: case kDouble: - if (!r_src.IsFloat()) { - if (displacement <= 1020) { - store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_base.GetReg(), - displacement >> 2); - } else { - store = StoreBaseDispBody(r_base, displacement, r_src.GetLow(), k32); - null_pointer_safepoint = true; - StoreBaseDispBody(r_base, displacement + 4, r_src.GetHigh(), k32); - } - already_generated = true; - } else { + // Intentional fall-though. + case k64: { + DCHECK_EQ(displacement & 3, 0); + encoded_disp = (displacement & 1020) >> 2; // Within range of kThumb2Vstrd/kThumb2StrdI8. + RegStorage r_ptr = r_base; + if ((displacement & ~1020) != 0) { + r_ptr = AllocTemp(); + // Add displacement & ~1020 to base, it's a single instruction for up to +-256KiB. + OpRegRegImm(kOpAdd, r_ptr, r_base, displacement & ~1020); + } + if (r_src.IsFloat()) { DCHECK(!r_src.IsPair()); - opcode = kThumb2Vstrd; - if (displacement <= 1020) { - short_form = true; - encoded_disp >>= 2; - } + store = NewLIR3(kThumb2Vstrd, r_src.GetReg(), r_ptr.GetReg(), encoded_disp); + } else { + store = NewLIR4(kThumb2StrdI8, r_src.GetLowReg(), r_src.GetHighReg(), r_ptr.GetReg(), + encoded_disp); + } + if ((displacement & ~1020) != 0) { + FreeTemp(r_ptr); } + already_generated = true; break; + } case kSingle: // Intentional fall-through. case k32: @@ -1070,11 +1068,6 @@ LIR* ArmMir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStora // TODO: In future, may need to differentiate Dalvik & spill accesses if (r_base == rs_rARM_SP) { AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit()); - } else { - // We might need to generate a safepoint if we have two store instructions (wide or double). - if (!Runtime::Current()->ExplicitNullChecks() && null_pointer_safepoint) { - MarkSafepointPC(store); - } } return store; } @@ -1119,7 +1112,7 @@ LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { } LIR* ArmMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_src, OpSize size, int s_reg) { + int displacement, RegStorage r_src, OpSize size) { LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm"; return NULL; } @@ -1130,7 +1123,7 @@ LIR* ArmMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int o } LIR* ArmMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_dest, OpSize size, int s_reg) { + int displacement, RegStorage r_dest, OpSize size) { LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm"; return NULL; } diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h index 4e784c6b38..3d5e0543c8 100644 --- a/compiler/dex/quick/arm64/codegen_arm64.h +++ b/compiler/dex/quick/arm64/codegen_arm64.h @@ -32,19 +32,20 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE; LIR* CheckSuspendUsingLoad() OVERRIDE; RegStorage LoadHelper(ThreadOffset<4> offset); - LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, - int s_reg); + LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, + OpSize size) OVERRIDE; LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, - OpSize size); + OpSize size) OVERRIDE; LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, - RegStorage r_dest, OpSize size, int s_reg); + RegStorage r_dest, OpSize size) OVERRIDE; LIR* LoadConstantNoClobber(RegStorage r_dest, int value); LIR* LoadConstantWide(RegStorage r_dest, int64_t value); - LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); + LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, + OpSize size) OVERRIDE; LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, - OpSize size); + OpSize size) OVERRIDE; LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, - RegStorage r_src, OpSize size, int s_reg); + RegStorage r_src, OpSize size) OVERRIDE; void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg); // Required for target - register utilities. @@ -171,8 +172,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { void OpRegCopyWide(RegStorage dest, RegStorage src); void OpTlsCmp(ThreadOffset<4> offset, int val); - LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, - int s_reg); + LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size); LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); LIR* OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2, int shift); diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc index 8ff1830050..3782bc9ead 100644 --- a/compiler/dex/quick/arm64/utility_arm64.cc +++ b/compiler/dex/quick/arm64/utility_arm64.cc @@ -825,7 +825,7 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt * performing null check, incoming MIR can be null. */ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, - OpSize size, int s_reg) { + OpSize size) { LIR* load = NULL; ArmOpcode opcode = kThumbBkpt; bool short_form = false; @@ -850,9 +850,9 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor load = NewLIR4(kThumb2LdrdI8, r_dest.GetLowReg(), r_dest.GetHighReg(), r_base.GetReg(), displacement >> 2); } else { - load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), k32, s_reg); + load = LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), k32); null_pointer_safepoint = true; - LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), k32, INVALID_SREG); + LoadBaseDispBody(r_base, displacement + 4, r_dest.GetHigh(), k32); } already_generated = true; } @@ -935,7 +935,7 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor if (r_dest.IsFloat()) { // No index ops - must use a long sequence. Turn the offset into a direct pointer. OpRegReg(kOpAdd, reg_offset, r_base); - load = LoadBaseDispBody(reg_offset, 0, r_dest, size, s_reg); + load = LoadBaseDispBody(reg_offset, 0, r_dest, size); } else { load = LoadBaseIndexed(r_base, reg_offset, r_dest, 0, size); } @@ -955,13 +955,13 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor return load; } -LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, - int s_reg) { +LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, + OpSize size) { // TODO: base this on target. if (size == kWord) { size = k32; } - return LoadBaseDispBody(r_base, displacement, r_dest, size, s_reg); + return LoadBaseDispBody(r_base, displacement, r_dest, size); } @@ -1119,7 +1119,7 @@ LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { } LIR* Arm64Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_src, OpSize size, int s_reg) { + int displacement, RegStorage r_src, OpSize size) { LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm"; return NULL; } @@ -1130,7 +1130,7 @@ LIR* Arm64Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int } LIR* Arm64Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_dest, OpSize size, int s_reg) { + int displacement, RegStorage r_dest, OpSize size) { LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm"; return NULL; } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 395cff7d61..83d50452f1 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -642,7 +642,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, RegLocation rl_result = EvalLoc(rl_dest, result_reg_kind, true); OpSize size = LoadStoreOpSize(is_long_or_double, rl_result.ref); - LoadBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, size, INVALID_SREG); + LoadBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_result.reg, size); FreeTemp(r_base); if (field_info.IsVolatile()) { @@ -704,8 +704,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, result_reg_kind = kFPReg; } rl_result = EvalLoc(rl_dest, result_reg_kind, true); - LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, - size, rl_obj.s_reg_low); + LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, size); MarkPossibleNullPointerException(opt_flags); if (field_info.IsVolatile()) { // Without context sensitive analysis, we must issue the most conservative barriers. @@ -717,7 +716,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, RegStorage reg_ptr = AllocTemp(); OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, field_info.FieldOffset().Int32Value()); rl_result = EvalLoc(rl_dest, reg_class, true); - LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, INVALID_SREG); + LoadBaseDisp(reg_ptr, 0, rl_result.reg, size); MarkPossibleNullPointerException(opt_flags); if (field_info.IsVolatile()) { // Without context sensitive analysis, we must issue the most conservative barriers. @@ -731,8 +730,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, } else { rl_result = EvalLoc(rl_dest, reg_class, true); GenNullCheck(rl_obj.reg, opt_flags); - LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, k32, - rl_obj.s_reg_low); + LoadBaseDisp(rl_obj.reg, field_info.FieldOffset().Int32Value(), rl_result.reg, k32); MarkPossibleNullPointerException(opt_flags); if (field_info.IsVolatile()) { // Without context sensitive analysis, we must issue the most conservative barriers. diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 960ac10528..d51f2e0d32 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -1133,8 +1133,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf); } else { - LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf, - INVALID_SREG); + LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, kUnsignedHalf); } FreeTemp(reg_off); FreeTemp(reg_ptr); @@ -1429,11 +1428,11 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (is_long) { if (cu_->instruction_set == kX86) { - LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg, k64, INVALID_SREG); + LoadBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_result.reg, k64); } else { RegStorage rl_temp_offset = AllocTemp(); OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg); - LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, INVALID_SREG); + LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64); FreeTemp(rl_temp_offset); } } else { @@ -1476,7 +1475,7 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, if (is_long) { rl_value = LoadValueWide(rl_src_value, kCoreReg); if (cu_->instruction_set == kX86) { - StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg, k64, INVALID_SREG); + StoreBaseIndexedDisp(rl_object.reg, rl_offset.reg, 0, 0, rl_value.reg, k64); } else { RegStorage rl_temp_offset = AllocTemp(); OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg); diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index 6fe1e3169b..fc6af29119 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -123,7 +123,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest) { } else { DCHECK((rl_src.location == kLocDalvikFrame) || (rl_src.location == kLocCompilerTemp)); - LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64, INVALID_SREG); + LoadBaseDisp(TargetReg(kSp), SRegOffset(rl_src.s_reg_low), r_dest, k64); } } diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index cdabf8ebc1..20fd4b1988 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -32,20 +32,20 @@ class MipsMir2Lir FINAL : public Mir2Lir { bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE; LIR* CheckSuspendUsingLoad() OVERRIDE; RegStorage LoadHelper(ThreadOffset<4> offset); - LIR* LoadBaseDisp(int r_base, int displacement, int r_dest, OpSize size, int s_reg); - LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, - int s_reg); + LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, + OpSize size) OVERRIDE; LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, - OpSize size); + OpSize size) OVERRIDE; LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, - RegStorage r_dest, OpSize size, int s_reg); + RegStorage r_dest, OpSize size) OVERRIDE; LIR* LoadConstantNoClobber(RegStorage r_dest, int value); LIR* LoadConstantWide(RegStorage r_dest, int64_t value); - LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); + LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, + OpSize size) OVERRIDE; LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, - OpSize size); + OpSize size) OVERRIDE; LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, - RegStorage r_src, OpSize size, int s_reg); + RegStorage r_src, OpSize size) OVERRIDE; void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg); // Required for target - register utilities. @@ -171,7 +171,7 @@ class MipsMir2Lir FINAL : public Mir2Lir { // TODO: collapse r_dest. LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, - RegStorage r_dest_hi, OpSize size, int s_reg); + RegStorage r_dest_hi, OpSize size); // TODO: collapse r_src. LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, RegStorage r_src_hi, OpSize size); diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index fe2e495121..fdfe7fe25e 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -290,7 +290,7 @@ bool MipsMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); DCHECK(size == kSignedByte); - LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG); + LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size); StoreValue(rl_dest, rl_result); return true; } @@ -511,7 +511,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, GenArrayBoundsCheck(rl_index.reg, reg_len); FreeTemp(reg_len); } - LoadBaseDisp(reg_ptr, 0, rl_result.reg, size, INVALID_SREG); + LoadBaseDisp(reg_ptr, 0, rl_result.reg, size); FreeTemp(reg_ptr); StoreValueWide(rl_dest, rl_result); diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 9aa929cbf3..83974119bc 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -448,7 +448,7 @@ LIR* MipsMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto // FIXME: don't split r_dest into 2 containers. LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, - RegStorage r_dest_hi, OpSize size, int s_reg) { + RegStorage r_dest_hi, OpSize size) { /* * Load value from base + displacement. Optionally perform null check * on base (which must have an associated s_reg and MIR). If not @@ -546,16 +546,15 @@ LIR* MipsMir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStora } LIR* MipsMir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, - OpSize size, int s_reg) { + OpSize size) { // TODO: base this on target. if (size == kWord) { size = k32; } if (size == k64 || size == kDouble) { - return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), k64, s_reg); + return LoadBaseDispBody(r_base, displacement, r_dest.GetLow(), r_dest.GetHigh(), size); } else { - return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size, - s_reg); + return LoadBaseDispBody(r_base, displacement, r_dest, RegStorage::InvalidReg(), size); } } @@ -665,7 +664,7 @@ LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) { } LIR* MipsMir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_src, OpSize size, int s_reg) { + int displacement, RegStorage r_src, OpSize size) { LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for MIPS"; return NULL; } @@ -676,7 +675,7 @@ LIR* MipsMir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int } LIR* MipsMir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_dest, OpSize size, int s_reg) { + int displacement, RegStorage r_dest, OpSize size) { LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for MIPS"; return NULL; } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 9915ff6f3a..d10296f1c4 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -59,7 +59,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, bool wide) { RegStorage new_regs = AllocTypedTempWide(false, kAnyReg); reg_arg_low = new_regs.GetLow(); reg_arg_high = new_regs.GetHigh(); - LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64, INVALID_SREG); + LoadBaseDisp(TargetReg(kSp), offset, new_regs, k64); } else { reg_arg_high = AllocTemp(); int offset_high = offset + sizeof(uint32_t); @@ -112,7 +112,7 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high); Load32Disp(TargetReg(kSp), offset, rl_dest.reg.GetLow()); } else { - LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64, INVALID_SREG); + LoadBaseDisp(TargetReg(kSp), offset, rl_dest.reg, k64); } } } @@ -137,7 +137,7 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) { LockArg(data.object_arg); RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float); RegStorage reg_obj = LoadArg(data.object_arg); - LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg, size, INVALID_SREG); + LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg, size); if (data.is_volatile) { // Without context sensitive analysis, we must issue the most conservative barriers. // In this case, either a load or store may follow so we issue both barriers. diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index cc6532c76c..4891d8c830 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -843,15 +843,15 @@ class Mir2Lir : public Backend { LIR* LoadConstant(RegStorage r_dest, int value); // Natural word size. LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { - return LoadBaseDisp(r_base, displacement, r_dest, kWord, INVALID_SREG); + return LoadBaseDisp(r_base, displacement, r_dest, kWord); } // Load 32 bits, regardless of target. LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) { - return LoadBaseDisp(r_base, displacement, r_dest, k32, INVALID_SREG); + return LoadBaseDisp(r_base, displacement, r_dest, k32); } // Load a reference at base + displacement and decompress into register. LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest) { - return LoadBaseDisp(r_base, displacement, r_dest, kReference, INVALID_SREG); + return LoadBaseDisp(r_base, displacement, r_dest, kReference); } // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); @@ -975,13 +975,12 @@ class Mir2Lir : public Backend { virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0; virtual LIR* CheckSuspendUsingLoad() = 0; virtual RegStorage LoadHelper(ThreadOffset<4> offset) = 0; - virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, - int s_reg) = 0; + virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, + OpSize size) = 0; virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, OpSize size) = 0; virtual LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_dest, OpSize size, - int s_reg) = 0; + int displacement, RegStorage r_dest, OpSize size) = 0; virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0; virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0; virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, @@ -989,8 +988,7 @@ class Mir2Lir : public Backend { virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, OpSize size) = 0; virtual LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_src, OpSize size, - int s_reg) = 0; + int displacement, RegStorage r_src, OpSize size) = 0; virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0; // Required for target - register utilities. diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 1898738930..a03e5f2e04 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -32,19 +32,20 @@ class X86Mir2Lir FINAL : public Mir2Lir { bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE; LIR* CheckSuspendUsingLoad() OVERRIDE; RegStorage LoadHelper(ThreadOffset<4> offset); - LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size, - int s_reg); + LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, + OpSize size) OVERRIDE; LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, - OpSize size); + OpSize size) OVERRIDE; LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, - RegStorage r_dest, OpSize size, int s_reg); + RegStorage r_dest, OpSize size) OVERRIDE; LIR* LoadConstantNoClobber(RegStorage r_dest, int value); LIR* LoadConstantWide(RegStorage r_dest, int64_t value); - LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size); + LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, + OpSize size) OVERRIDE; LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, - OpSize size); + OpSize size) OVERRIDE; LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement, - RegStorage r_src, OpSize size, int s_reg); + RegStorage r_src, OpSize size) OVERRIDE; void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg); // Required for target - register utilities. diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index 74828c7ad9..d1c2e70c2c 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -183,7 +183,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do if (is_double) { rl_result = EvalLocWide(rl_dest, kFPReg, true); - LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, INVALID_SREG); + LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64); StoreFinalValueWide(rl_dest, rl_result); } else { diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 315d5804ff..ce5766f78f 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -691,7 +691,7 @@ bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); // Unaligned access is allowed on x86. - LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, INVALID_SREG); + LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size); if (size == k64) { StoreValueWide(rl_dest, rl_result); } else { @@ -1007,7 +1007,7 @@ void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int NewLIR2(kX86Xor32RR, dest.GetReg(), dest.GetReg()); break; case 1: - LoadBaseDisp(rs_rX86_SP, displacement, dest, k32, sreg); + LoadBaseDisp(rs_rX86_SP, displacement, dest, k32); break; default: m = NewLIR4(IS_SIMM8(val) ? kX86Imul32RMI8 : kX86Imul32RMI, dest.GetReg(), @@ -1111,8 +1111,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation if (src1_in_reg) { NewLIR2(kX86Mov32RR, rs_r1.GetReg(), rl_src1.reg.GetHighReg()); } else { - LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, - k32, GetSRegHi(rl_src1.s_reg_low)); + LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, rs_r1, k32); } if (is_square) { @@ -1135,8 +1134,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation if (src2_in_reg) { NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetHighReg()); } else { - LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, - k32, GetSRegHi(rl_src2.s_reg_low)); + LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, rs_r0, k32); } // EAX <- EAX * 1L (2H * 1L) @@ -1169,8 +1167,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation if (src2_in_reg) { NewLIR2(kX86Mov32RR, rs_r0.GetReg(), rl_src2.reg.GetLowReg()); } else { - LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, - k32, rl_src2.s_reg_low); + LoadBaseDisp(rs_rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, rs_r0, k32); } // EDX:EAX <- 2L * 1L (double precision) @@ -1419,8 +1416,7 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, } } rl_result = EvalLoc(rl_dest, reg_class, true); - LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg, size, - INVALID_SREG); + LoadBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_result.reg, size); if ((size == k64) || (size == kDouble)) { StoreValueWide(rl_dest, rl_result); } else { @@ -1477,10 +1473,9 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, rl_src.reg.GetRegNum() >= rs_rX86_SP.GetRegNum()) { RegStorage temp = AllocTemp(); OpRegCopy(temp, rl_src.reg); - StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, temp, size, INVALID_SREG); + StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, temp, size); } else { - StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg, size, - INVALID_SREG); + StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg, size); } if (card_mark) { // Free rl_index if its a temp. Ensures there are 2 free regs for card mark. diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index 7fe0d1f4d6..8423ec4a50 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -520,7 +520,7 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { // 4 byte offset. We will fix this up in the assembler later to have the right // value. res = LoadBaseDisp(rl_method.reg, 256 /* bogus */, RegStorage::Solo64(low_reg_val), - kDouble, INVALID_SREG); + kDouble); res->target = data_target; res->flags.fixup = kFixupLoad; SetMemRefType(res, true, kLiteral); @@ -546,7 +546,7 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) { } LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_dest, OpSize size, int s_reg) { + int displacement, RegStorage r_dest, OpSize size) { LIR *load = NULL; LIR *load2 = NULL; bool is_array = r_index.Valid(); @@ -663,21 +663,21 @@ LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int /* Load value from base + scaled index. */ LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale, OpSize size) { - return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size, INVALID_SREG); + return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size); } LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, - OpSize size, int s_reg) { + OpSize size) { // TODO: base this on target. if (size == kWord) { size = k32; } return LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest, - size, s_reg); + size); } LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, - int displacement, RegStorage r_src, OpSize size, int s_reg) { + int displacement, RegStorage r_src, OpSize size) { LIR *store = NULL; LIR *store2 = NULL; bool is_array = r_index.Valid(); @@ -752,7 +752,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int /* store value base base + scaled index. */ LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale, OpSize size) { - return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size, INVALID_SREG); + return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size); } LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, @@ -761,8 +761,7 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, if (size == kWord) { size = k32; } - return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size, - INVALID_SREG); + return StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size); } LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, |