diff options
author | Andreas Gampe <agampe@google.com> | 2014-07-04 18:02:38 -0700 |
---|---|---|
committer | Andreas Gampe <agampe@google.com> | 2014-07-12 13:33:12 -0700 |
commit | ccc60264229ac96d798528d2cb7dbbdd0deca993 (patch) | |
tree | 998378a38ca4d510090c7b4e7832379989354680 /compiler/dex/quick/gen_invoke.cc | |
parent | bc9127a5d451058aede5562e2b015caec618d008 (diff) | |
download | android_art-ccc60264229ac96d798528d2cb7dbbdd0deca993.tar.gz android_art-ccc60264229ac96d798528d2cb7dbbdd0deca993.tar.bz2 android_art-ccc60264229ac96d798528d2cb7dbbdd0deca993.zip |
ART: Rework TargetReg(symbolic_reg, wide)
Make the standard implementation in Mir2Lir and the specialized one
in the x86 backend return a pair when wide = "true". Introduce
WideKind enumeration to improve code readability. Simplify generic
code based on this implementation.
Change-Id: I670d45aa2572eedfdc77ac763e6486c83f8e26b4
Diffstat (limited to 'compiler/dex/quick/gen_invoke.cc')
-rwxr-xr-x | compiler/dex/quick/gen_invoke.cc | 339 |
1 files changed, 159 insertions, 180 deletions
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 3a304304e9..367e07bb81 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -131,9 +131,10 @@ void Mir2Lir::CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool s INSTANTIATE(void Mir2Lir::CallRuntimeHelper, bool safepoint_pc) template <size_t pointer_size> -void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, + bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadConstant(TargetReg(kArg0, false), arg0); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -143,7 +144,7 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); + OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -156,13 +157,7 @@ void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_off if (arg0.wide == 0) { LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0)); } else { - RegStorage r_tmp; - if (cu_->target64) { - r_tmp = TargetReg(kArg0, true); - } else { - r_tmp = TargetReg(arg0.fp ? kFArg0 : kArg0, arg0.fp ? kFArg1 : kArg1); - } - LoadValueDirectWideFixed(arg0, r_tmp); + LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide)); } ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); @@ -173,8 +168,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadConstant(TargetReg(kArg0, false), arg0); - LoadConstant(TargetReg(kArg1, false), arg1); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); + LoadConstant(TargetReg(kArg1, kNotWide), arg1); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -184,23 +179,14 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0, RegLocation arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); + DCHECK(!arg1.fp); if (arg1.wide == 0) { LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1)); } else { - RegStorage r_tmp; - if (cu_->target64) { - r_tmp = TargetReg(kArg1, true); - } else { - if (cu_->instruction_set == kMips) { - // skip kArg1 for stack alignment. - r_tmp = TargetReg(kArg2, kArg3); - } else { - r_tmp = TargetReg(kArg1, kArg2); - } - } + RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide); LoadValueDirectWideFixed(arg1, r_tmp); } - LoadConstant(TargetReg(kArg0, false), arg0); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -213,7 +199,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_ RegStorage r_tgt = CallHelperSetup(helper_offset); DCHECK(!arg0.wide); LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0)); - LoadConstant(TargetReg(kArg1, false), arg1); + LoadConstant(TargetReg(kArg1, kNotWide), arg1); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -224,8 +210,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0, RegStorage arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1); - LoadConstant(TargetReg(kArg0, false), arg0); + OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -235,8 +221,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, int arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); - LoadConstant(TargetReg(kArg1, false), arg1); + OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0); + LoadConstant(TargetReg(kArg1, kNotWide), arg1); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -246,8 +232,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadCurrMethodDirect(TargetRefReg(kArg1)); - LoadConstant(TargetReg(kArg0, false), arg0); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -257,11 +243,12 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - DCHECK(!IsSameReg(TargetReg(kArg1, arg0.Is64Bit()), arg0)); - if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) { - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); + DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0)); + RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind()); + if (r_tmp.NotExactlyEquals(arg0)) { + OpRegCopy(r_tmp, arg0); } - LoadCurrMethodDirect(TargetRefReg(kArg1)); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -272,11 +259,12 @@ void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> h RegStorage arg0, RegLocation arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - DCHECK(!IsSameReg(TargetReg(kArg1, arg0.Is64Bit()), arg0)); - if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) { - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); + DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0)); + RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind()); + if (r_tmp.NotExactlyEquals(arg0)) { + OpRegCopy(r_tmp, arg0); } - LoadCurrMethodDirect(TargetRefReg(kArg1)); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); @@ -313,47 +301,26 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> } else { DCHECK(!cu_->target64); if (arg0.wide == 0) { - LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0, false) : TargetReg(kArg0, false)); + LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide)); if (arg1.wide == 0) { if (cu_->instruction_set == kMips) { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2, false) : TargetReg(kArg1, false)); + LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide)); } else { - LoadValueDirectFixed(arg1, TargetReg(kArg1, false)); + LoadValueDirectFixed(arg1, TargetReg(kArg1, kNotWide)); } } else { if (cu_->instruction_set == kMips) { - RegStorage r_tmp; - if (arg1.fp) { - r_tmp = TargetReg(kFArg2, kFArg3); - } else { - // skip kArg1 for stack alignment. - r_tmp = TargetReg(kArg2, kArg3); - } - LoadValueDirectWideFixed(arg1, r_tmp); + LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide)); } else { - RegStorage r_tmp; - r_tmp = TargetReg(kArg1, kArg2); - LoadValueDirectWideFixed(arg1, r_tmp); + LoadValueDirectWideFixed(arg1, TargetReg(kArg1, kWide)); } } } else { - RegStorage r_tmp; - if (arg0.fp) { - r_tmp = TargetReg(kFArg0, kFArg1); - } else { - r_tmp = TargetReg(kArg0, kArg1); - } - LoadValueDirectWideFixed(arg0, r_tmp); + LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide)); if (arg1.wide == 0) { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2, false) : TargetReg(kArg2, false)); + LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide)); } else { - RegStorage r_tmp; - if (arg1.fp) { - r_tmp = TargetReg(kFArg2, kFArg3); - } else { - r_tmp = TargetReg(kArg2, kArg3); - } - LoadValueDirectWideFixed(arg1, r_tmp); + LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide)); } } } @@ -364,19 +331,21 @@ INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation a RegLocation arg1, bool safepoint_pc) void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) { - if (IsSameReg(arg1, TargetReg(kArg0, arg1.Is64Bit()))) { - if (IsSameReg(arg0, TargetReg(kArg1, arg0.Is64Bit()))) { + WideKind arg0_kind = arg0.GetWideKind(); + WideKind arg1_kind = arg1.GetWideKind(); + if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) { + if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) { // Swap kArg0 and kArg1 with kArg2 as temp. - OpRegCopy(TargetReg(kArg2, arg1.Is64Bit()), arg1); - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); - OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), TargetReg(kArg2, arg1.Is64Bit())); + OpRegCopy(TargetReg(kArg2, arg1_kind), arg1); + OpRegCopy(TargetReg(kArg0, arg0_kind), arg0); + OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind)); } else { - OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1); - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); + OpRegCopy(TargetReg(kArg1, arg1_kind), arg1); + OpRegCopy(TargetReg(kArg0, arg0_kind), arg0); } } else { - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); - OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1); + OpRegCopy(TargetReg(kArg0, arg0_kind), arg0); + OpRegCopy(TargetReg(kArg1, arg1_kind), arg1); } } @@ -396,7 +365,7 @@ void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offse RegStorage arg1, int arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); CopyToArgumentRegs(arg0, arg1); - LoadConstant(TargetReg(kArg2, false), arg2); + LoadConstant(TargetReg(kArg2, kNotWide), arg2); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -408,8 +377,8 @@ void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> h int arg0, RegLocation arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); - LoadCurrMethodDirect(TargetRefReg(kArg1)); - LoadConstant(TargetReg(kArg0, false), arg0); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -420,9 +389,9 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadCurrMethodDirect(TargetRefReg(kArg1)); - LoadConstant(TargetReg(kArg2, false), arg2); - LoadConstant(TargetReg(kArg0, false), arg0); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); + LoadConstant(TargetReg(kArg2, kNotWide), arg2); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -439,15 +408,9 @@ void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_si if (arg2.wide == 0) { LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); } else { - RegStorage r_tmp; - if (cu_->target64) { - r_tmp = TargetReg(kArg2, true); - } else { - r_tmp = TargetReg(kArg2, kArg3); - } - LoadValueDirectWideFixed(arg2, r_tmp); + LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide)); } - LoadConstant(TargetReg(kArg0, false), arg0); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -455,10 +418,12 @@ INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation, int arg0, RegLocation arg2, bool safepoint_pc) template <size_t pointer_size> -void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, - RegLocation arg0, RegLocation arg1, - RegLocation arg2, - bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation( + ThreadOffset<pointer_size> helper_offset, + RegLocation arg0, + RegLocation arg1, + RegLocation arg2, + bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0)); LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1)); @@ -485,7 +450,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { */ RegLocation rl_src = rl_method; rl_src.location = kLocPhysReg; - rl_src.reg = TargetRefReg(kArg0); + rl_src.reg = TargetReg(kArg0, kRef); rl_src.home = false; MarkLive(rl_src); StoreValue(rl_method, rl_src); @@ -559,15 +524,44 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { } else { // If arriving in frame & promoted if (v_map->core_location == kLocPhysReg) { - Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); + Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), + RegStorage::Solo32(v_map->core_reg)); } if (v_map->fp_location == kLocPhysReg) { - Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->fp_reg)); + Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), + RegStorage::Solo32(v_map->fp_reg)); } } } } +static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) { + RegLocation rl_arg = info->args[0]; + cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef)); +} + +static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) { + cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags); + // get this->klass_ [use kArg1, set kArg0] + cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(), + cg->TargetReg(kArg0, kRef), + kNotVolatile); + cg->MarkPossibleNullPointerException(info->opt_flags); +} + +static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info, + const RegStorage* alt_from, + const CompilationUnit* cu, Mir2Lir* cg) { + if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { + // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt] + cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, + mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), + cg->TargetPtrReg(kInvokeTgt)); + return true; + } + return false; +} + /* * Bit of a hack here - in the absence of a real scheduling pass, * emit the next instruction in static & direct invoke sequences. @@ -589,7 +583,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, cg->LoadCodeAddress(target_method, type, kInvokeTgt); } if (direct_method != static_cast<uintptr_t>(-1)) { - cg->LoadConstant(cg->TargetRefReg(kArg0), direct_method); + cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method); } else { cg->LoadMethodAddress(target_method, type, kArg0); } @@ -598,7 +592,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, return -1; } } else { - RegStorage arg0_ref = cg->TargetRefReg(kArg0); + RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); switch (state) { case 0: // Get the current Method* [sets kArg0] // TUNING: we can save a reg copy if Method* has been promoted. @@ -627,12 +621,11 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, kNotVolatile); break; case 3: // Grab the code from the method* - if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { - if (direct_code == 0) { - cg->LoadWordDisp(arg0_ref, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), - cg->TargetPtrReg(kInvokeTgt)); + if (direct_code == 0) { + if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) { + break; // kInvokeTgt := arg0_ref->entrypoint } + } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { break; } // Intentional fallthrough for x86 @@ -660,34 +653,24 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, * fully resolved at compile time. */ switch (state) { - case 0: { // Get "this" [set kArg1] - RegLocation rl_arg = info->args[0]; - cg->LoadValueDirectFixed(rl_arg, cg->TargetRefReg(kArg1)); + case 0: + CommonCallCodeLoadThisIntoArg1(info, cg); // kArg1 := this break; - } - case 1: // Is "this" null? [use kArg1] - cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags); - // get this->klass_ [use kArg1, set kArg0] - cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(), - cg->TargetRefReg(kArg0), - kNotVolatile); - cg->MarkPossibleNullPointerException(info->opt_flags); + case 1: + CommonCallCodeLoadClassIntoArg0(info, cg); // kArg0 := kArg1->class + // Includes a null-check. break; case 2: { // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0] int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + method_idx * sizeof(mirror::Class::VTableEntry); // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0] - cg->LoadRefDisp(cg->TargetRefReg(kArg0), offset, cg->TargetRefReg(kArg0), kNotVolatile); + cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile); break; } case 3: - if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { - // Get the compiled code address [use kArg0, set kInvokeTgt] - cg->LoadWordDisp(cg->TargetRefReg(kArg0), - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), - cg->TargetPtrReg(kInvokeTgt)); - break; + if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) { + break; // kInvokeTgt := kArg0->entrypoint } // Intentional fallthrough for X86 default: @@ -711,40 +694,28 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, switch (state) { case 0: // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)] CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); - cg->LoadConstant(cg->TargetReg(kHiddenArg, false), target_method.dex_method_index); + cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index); if (cu->instruction_set == kX86) { - cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, false), cg->TargetReg(kHiddenArg, false)); + cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide)); } break; - case 1: { // Get "this" [set kArg1] - RegLocation rl_arg = info->args[0]; - cg->LoadValueDirectFixed(rl_arg, cg->TargetRefReg(kArg1)); + case 1: + CommonCallCodeLoadThisIntoArg1(info, cg); // kArg1 := this break; - } - case 2: // Is "this" null? [use kArg1] - cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags); - // Get this->klass_ [use kArg1, set kArg0] - cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(), - cg->TargetRefReg(kArg0), - kNotVolatile); - cg->MarkPossibleNullPointerException(info->opt_flags); + case 2: + CommonCallCodeLoadClassIntoArg0(info, cg); // kArg0 := kArg1->class + // Includes a null-check. break; case 3: { // Get target method [use kInvokeTgt, set kArg0] int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0] - cg->LoadRefDisp(cg->TargetRefReg(kArg0), offset, - cg->TargetRefReg(kArg0), - kNotVolatile); + cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile); break; } case 4: - if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { - // Get the compiled code address [use kArg0, set kInvokeTgt] - cg->LoadWordDisp(cg->TargetRefReg(kArg0), - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), - cg->TargetPtrReg(kInvokeTgt)); - break; + if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) { + break; // kInvokeTgt := kArg0->entrypoint } // Intentional fallthrough for X86 default: @@ -754,9 +725,9 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, } template <size_t pointer_size> -static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset<pointer_size> trampoline, - int state, const MethodReference& target_method, - uint32_t method_idx) { +static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, + ThreadOffset<pointer_size> trampoline, int state, + const MethodReference& target_method, uint32_t method_idx) { Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); /* * This handles the case in which the base method is not fully @@ -765,11 +736,12 @@ static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset<po if (state == 0) { if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { // Load trampoline target - cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(), cg->TargetPtrReg(kInvokeTgt)); + cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(), + cg->TargetPtrReg(kInvokeTgt)); } // Load kArg0 with method index CHECK_EQ(cu->dex_file, target_method.dex_file); - cg->LoadConstant(cg->TargetReg(kArg0, false), target_method.dex_method_index); + cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index); return 1; } return -1; @@ -820,10 +792,12 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, uint32_t unused, uintptr_t unused2, uintptr_t unused3, InvokeType unused4) { if (cu->target64) { - ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeVirtualTrampolineWithAccessCheck); + ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, + pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0); } else { - ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeVirtualTrampolineWithAccessCheck); + ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, + pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0); } } @@ -834,10 +808,12 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, uint32_t unused, uintptr_t unused2, uintptr_t unused3, InvokeType unused4) { if (cu->target64) { - ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeInterfaceTrampolineWithAccessCheck); + ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, + pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0); } else { - ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeInterfaceTrampolineWithAccessCheck); + ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, + pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0); } } @@ -848,7 +824,8 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this) { int last_arg_reg = 3 - 1; - int arg_regs[3] = {TargetReg(kArg1, false).GetReg(), TargetReg(kArg2, false).GetReg(), TargetReg(kArg3, false).GetReg()}; + int arg_regs[3] = {TargetReg(kArg1, kNotWide).GetReg(), TargetReg(kArg2, kNotWide).GetReg(), + TargetReg(kArg3, kNotWide).GetReg()}; int next_reg = 0; int next_arg = 0; @@ -923,7 +900,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, } } else { // kArg2 & rArg3 can safely be used here - reg = TargetReg(kArg3, false); + reg = TargetReg(kArg3, kNotWide); { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg); @@ -947,7 +924,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, if (rl_arg.location == kLocPhysReg) { arg_reg = rl_arg.reg; } else { - arg_reg = rl_arg.wide ? TargetReg(kArg2, kArg3) : TargetReg(kArg2, false); + arg_reg = TargetReg(kArg2, rl_arg.wide ? kWide : kNotWide); if (rl_arg.wide) { LoadValueDirectWideFixed(rl_arg, arg_reg); } else { @@ -978,13 +955,13 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, if (pcrLabel) { if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { - *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); + *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - Load32Disp(TargetRefReg(kArg1), 0, tmp); + Load32Disp(TargetReg(kArg1, kRef), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } @@ -1058,23 +1035,23 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Use vldm/vstm pair using kArg3 as a temp call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); - OpRegRegImm(kOpAdd, TargetRefReg(kArg3), TargetPtrReg(kSp), start_offset); + OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), start_offset); LIR* ld = nullptr; { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - ld = OpVldm(TargetRefReg(kArg3), regs_left_to_pass_via_stack); + ld = OpVldm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack); } // TUNING: loosen barrier ld->u.m.def_mask = &kEncodeAll; call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); - OpRegRegImm(kOpAdd, TargetRefReg(kArg3), TargetPtrReg(kSp), 4 /* Method* */ + (3 * 4)); + OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), 4 /* Method* */ + (3 * 4)); call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); LIR* st = nullptr; { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - st = OpVstm(TargetRefReg(kArg3), regs_left_to_pass_via_stack); + st = OpVstm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack); } st->u.m.def_mask = &kEncodeAll; call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, @@ -1148,7 +1125,8 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, if (ld2 != nullptr) { // For 64-bit load we can actually set up the aliasing information. AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); - AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); + AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, + true); } else { // Set barrier for 128-bit load. ld1->u.m.def_mask = &kEncodeAll; @@ -1158,7 +1136,8 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, if (st2 != nullptr) { // For 64-bit store we can actually set up the aliasing information. AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); - AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); + AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, + true); } else { // Set barrier for 128-bit store. st1->u.m.def_mask = &kEncodeAll; @@ -1173,7 +1152,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Instead of allocating a new temp, simply reuse one of the registers being used // for argument passing. - RegStorage temp = TargetReg(kArg3, false); + RegStorage temp = TargetReg(kArg3, kNotWide); // Now load the argument VR and store to the outs. Load32Disp(TargetPtrReg(kSp), current_src_offset, temp); @@ -1186,14 +1165,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, } } else { // Generate memcpy - OpRegRegImm(kOpAdd, TargetRefReg(kArg0), TargetPtrReg(kSp), outs_offset); - OpRegRegImm(kOpAdd, TargetRefReg(kArg1), TargetPtrReg(kSp), start_offset); + OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset); + OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset); if (cu_->target64) { - CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetRefReg(kArg0), - TargetRefReg(kArg1), (info->num_arg_words - 3) * 4, false); + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetReg(kArg0, kRef), + TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false); } else { - CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetRefReg(kArg0), - TargetRefReg(kArg1), (info->num_arg_words - 3) * 4, false); + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0, kRef), + TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false); } } @@ -1205,13 +1184,13 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, direct_code, direct_method, type); if (pcrLabel) { if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { - *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); + *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - Load32Disp(TargetRefReg(kArg1), 0, tmp); + Load32Disp(TargetReg(kArg1, kRef), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } @@ -1550,9 +1529,9 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { ClobberCallerSave(); LockCallTemps(); // Using fixed registers - RegStorage reg_ptr = TargetRefReg(kArg0); - RegStorage reg_char = TargetReg(kArg1, false); - RegStorage reg_start = TargetReg(kArg2, false); + RegStorage reg_ptr = TargetReg(kArg0, kRef); + RegStorage reg_char = TargetReg(kArg1, kNotWide); + RegStorage reg_start = TargetReg(kArg2, kNotWide); LoadValueDirectFixed(rl_obj, reg_ptr); LoadValueDirectFixed(rl_char, reg_char); @@ -1594,8 +1573,8 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { } ClobberCallerSave(); LockCallTemps(); // Using fixed registers - RegStorage reg_this = TargetRefReg(kArg0); - RegStorage reg_cmp = TargetRefReg(kArg1); + RegStorage reg_this = TargetReg(kArg0, kRef); + RegStorage reg_cmp = TargetReg(kArg1, kRef); RegLocation rl_this = info->args[0]; RegLocation rl_cmp = info->args[1]; @@ -1877,7 +1856,7 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) { call_inst = reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type); } else { - call_inst = OpMem(kOpBlx, TargetRefReg(kArg0), + call_inst = OpMem(kOpBlx, TargetReg(kArg0, kRef), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()); } } else { |