diff options
Diffstat (limited to 'compiler/dex/quick')
-rw-r--r-- | compiler/dex/quick/arm64/call_arm64.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/codegen_arm64.h | 8 | ||||
-rw-r--r-- | compiler/dex/quick/arm64/target_arm64.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/codegen_util.cc | 4 | ||||
-rw-r--r-- | compiler/dex/quick/gen_common.cc | 163 | ||||
-rwxr-xr-x | compiler/dex/quick/gen_invoke.cc | 339 | ||||
-rw-r--r-- | compiler/dex/quick/mir_to_lir.h | 45 | ||||
-rw-r--r-- | compiler/dex/quick/x86/call_x86.cc | 2 | ||||
-rw-r--r-- | compiler/dex/quick/x86/codegen_x86.h | 25 | ||||
-rwxr-xr-x | compiler/dex/quick/x86/int_x86.cc | 60 | ||||
-rwxr-xr-x | compiler/dex/quick/x86/target_x86.cc | 49 |
11 files changed, 350 insertions, 349 deletions
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc index d24f419b09..8117c62954 100644 --- a/compiler/dex/quick/arm64/call_arm64.cc +++ b/compiler/dex/quick/arm64/call_arm64.cc @@ -195,7 +195,7 @@ void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { // TUNING: How much performance we get when we inline this? // Since we've already flush all register. FlushAllRegs(); - LoadValueDirectFixed(rl_src, rs_x0); // = TargetRefReg(kArg0) + LoadValueDirectFixed(rl_src, rs_x0); // = TargetReg(kArg0, kRef) LockCallTemps(); // Prepare for explicit register usage LIR* null_check_branch = nullptr; if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h index 7d75da91d8..81ba8bc3b1 100644 --- a/compiler/dex/quick/arm64/codegen_arm64.h +++ b/compiler/dex/quick/arm64/codegen_arm64.h @@ -100,18 +100,14 @@ class Arm64Mir2Lir FINAL : public Mir2Lir { // Required for target - register utilities. RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE; - RegStorage TargetReg(SpecialTargetRegister symbolic_reg, bool is_wide) OVERRIDE { + RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE { RegStorage reg = TargetReg(symbolic_reg); - if (is_wide) { + if (wide_kind == kWide || wide_kind == kRef) { return (reg.Is64Bit()) ? reg : As64BitReg(reg); } else { return (reg.Is32Bit()) ? reg : As32BitReg(reg); } } - RegStorage TargetRefReg(SpecialTargetRegister symbolic_reg) OVERRIDE { - RegStorage reg = TargetReg(symbolic_reg); - return (reg.Is64Bit() ? reg : As64BitReg(reg)); - } RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE { RegStorage reg = TargetReg(symbolic_reg); return (reg.Is64Bit() ? reg : As64BitReg(reg)); diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc index 8264a064f7..2212380eb4 100644 --- a/compiler/dex/quick/arm64/target_arm64.cc +++ b/compiler/dex/quick/arm64/target_arm64.cc @@ -1111,7 +1111,7 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Instead of allocating a new temp, simply reuse one of the registers being used // for argument passing. - RegStorage temp = TargetReg(kArg3, false); + RegStorage temp = TargetReg(kArg3, kNotWide); // Now load the argument VR and store to the outs. Load32Disp(TargetReg(kSp), current_src_offset, temp); diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 048aca3735..60d25890d4 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -1212,7 +1212,7 @@ void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType data_target->operands[2] = type; } // Loads an ArtMethod pointer, which is a reference as it lives in the heap. - LIR* load_pc_rel = OpPcRelLoad(TargetRefReg(symbolic_reg), data_target); + LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target); AppendLIR(load_pc_rel); DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target); } @@ -1224,7 +1224,7 @@ void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_re data_target = AddWordData(&class_literal_list_, type_idx); } // Loads a Class pointer, which is a reference as it lives in the heap. - LIR* load_pc_rel = OpPcRelLoad(TargetRefReg(symbolic_reg), data_target); + LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target); AppendLIR(load_pc_rel); } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index c266a3c2e9..1fc0cff678 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -127,8 +127,8 @@ void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { m2l_->ResetDefTracking(); GenerateTargetLabel(kPseudoThrowTarget); - RegStorage arg1_32 = m2l_->TargetReg(kArg1, false); - RegStorage arg0_32 = m2l_->TargetReg(kArg0, false); + RegStorage arg1_32 = m2l_->TargetReg(kArg1, kNotWide); + RegStorage arg0_32 = m2l_->TargetReg(kArg0, kNotWide); m2l_->OpRegCopy(arg1_32, length_); m2l_->LoadConstant(arg0_32, index_); @@ -368,7 +368,8 @@ static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, if (!use_direct_type_ptr) { mir_to_lir->LoadClassType(type_idx, kArg0); func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); - mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, mir_to_lir->TargetReg(kArg0, false), + mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, + mir_to_lir->TargetReg(kArg0, kNotWide), rl_src, true); } else { // Use the direct pointer. @@ -431,8 +432,8 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { } else { GenFilledNewArrayCall<4>(this, cu_, elems, type_idx); } - FreeTemp(TargetReg(kArg2, false)); - FreeTemp(TargetReg(kArg1, false)); + FreeTemp(TargetReg(kArg2, kNotWide)); + FreeTemp(TargetReg(kArg1, kNotWide)); /* * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the * return region. Because AllocFromCode placed the new array @@ -440,7 +441,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { * added, it may be necessary to additionally copy all return * values to a home location in thread-local storage */ - RegStorage ref_reg = TargetRefReg(kRet0); + RegStorage ref_reg = TargetReg(kRet0, kRef); LockTemp(ref_reg); // TODO: use the correct component size, currently all supported types @@ -477,7 +478,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { switch (cu_->instruction_set) { case kThumb2: case kArm64: - r_val = TargetReg(kLr, false); + r_val = TargetReg(kLr, kNotWide); break; case kX86: case kX86_64: @@ -553,7 +554,7 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { storage_index_, true); } // Copy helper's result into r_base, a no-op on all but MIPS. - m2l_->OpRegCopy(r_base_, m2l_->TargetRefReg(kRet0)); + m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0, kRef)); m2l_->OpUnconditionalBranch(cont_); } @@ -601,10 +602,10 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, // May do runtime call so everything to home locations. FlushAllRegs(); // Using fixed register to sync with possible call to runtime support. - RegStorage r_method = TargetRefReg(kArg1); + RegStorage r_method = TargetReg(kArg1, kRef); LockTemp(r_method); LoadCurrMethodDirect(r_method); - r_base = TargetRefReg(kArg0); + r_base = TargetReg(kArg0, kRef); LockTemp(r_base); LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, kNotVolatile); @@ -618,7 +619,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, // The slow path is invoked if the r_base is NULL or the class pointed // to by it is not initialized. LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); - RegStorage r_tmp = TargetReg(kArg2, false); + RegStorage r_tmp = TargetReg(kArg2, kNotWide); LockTemp(r_tmp); LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, mirror::Class::StatusOffset().Int32Value(), @@ -698,10 +699,10 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, // May do runtime call so everything to home locations. FlushAllRegs(); // Using fixed register to sync with possible call to runtime support. - RegStorage r_method = TargetRefReg(kArg1); + RegStorage r_method = TargetReg(kArg1, kRef); LockTemp(r_method); LoadCurrMethodDirect(r_method); - r_base = TargetRefReg(kArg0); + r_base = TargetReg(kArg0, kRef); LockTemp(r_base); LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, kNotVolatile); @@ -715,7 +716,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, // The slow path is invoked if the r_base is NULL or the class pointed // to by it is not initialized. LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); - RegStorage r_tmp = TargetReg(kArg2, false); + RegStorage r_tmp = TargetReg(kArg2, kNotWide); LockTemp(r_tmp); LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, mirror::Class::StatusOffset().Int32Value(), @@ -961,7 +962,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, rl_method_.reg, true); } - m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetRefReg(kRet0)); + m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0, kRef)); m2l_->OpUnconditionalBranch(cont_); } @@ -1001,15 +1002,15 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { DCHECK(!IsTemp(rl_method.reg)); r_method = rl_method.reg; } else { - r_method = TargetRefReg(kArg2); + r_method = TargetReg(kArg2, kRef); LoadCurrMethodDirect(r_method); } LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), - TargetRefReg(kArg0), kNotVolatile); + TargetReg(kArg0, kRef), kNotVolatile); // Might call out to helper, which will return resolved string in kRet0 - LoadRefDisp(TargetRefReg(kArg0), offset_of_string, TargetRefReg(kRet0), kNotVolatile); - LIR* fromfast = OpCmpImmBranch(kCondEq, TargetRefReg(kRet0), 0, NULL); + LoadRefDisp(TargetReg(kArg0, kRef), offset_of_string, TargetReg(kRet0, kRef), kNotVolatile); + LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0, kRef), 0, NULL); LIR* cont = NewLIR0(kPseudoTargetLabel); { @@ -1078,10 +1079,12 @@ static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_ mir_to_lir->LoadClassType(type_idx, kArg0); if (!is_type_initialized) { func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); - mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetRefReg(kArg0), true); + mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0, kRef), + true); } else { func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); - mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetRefReg(kArg0), true); + mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0, kRef), + true); } } else { // Use the direct pointer. @@ -1200,9 +1203,9 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know FlushAllRegs(); // May generate a call - use explicit registers LockCallTemps(); - RegStorage method_reg = TargetRefReg(kArg1); + RegStorage method_reg = TargetReg(kArg1, kRef); LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* - RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class* + RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kArg0 @@ -1213,15 +1216,15 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), type_idx, true); } - OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref + OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref } else if (use_declaring_class) { - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); } else { // Load dex cache entry into class_reg (kArg2) - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg, kNotVolatile); int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); @@ -1236,8 +1239,8 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know } else { CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); } - OpRegCopy(TargetRefReg(kArg2), TargetRefReg(kRet0)); // Align usage with fast path - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); /* reload Ref */ + OpRegCopy(TargetReg(kArg2, kRef), TargetReg(kRet0, kRef)); // Align usage with fast path + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); /* reload Ref */ // Rejoin code paths LIR* hop_target = NewLIR0(kPseudoTargetLabel); hop_branch->target = hop_target; @@ -1249,25 +1252,25 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know // On MIPS rArg0 != rl_result, place false in result if branch is taken. LoadConstant(rl_result.reg, 0); } - LIR* branch1 = OpCmpImmBranch(kCondEq, TargetRefReg(kArg0), 0, NULL); + LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, NULL); /* load object->klass_ */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); - LoadRefDisp(TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetRefReg(kArg1), - kNotVolatile); + LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), + TargetReg(kArg1, kRef), kNotVolatile); /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ LIR* branchover = NULL; if (type_known_final) { // rl_result == ref == null == 0. if (cu_->instruction_set == kThumb2) { - OpRegReg(kOpCmp, TargetRefReg(kArg1), TargetRefReg(kArg2)); // Same? + OpRegReg(kOpCmp, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef)); // Same? LIR* it = OpIT(kCondEq, "E"); // if-convert the test LoadConstant(rl_result.reg, 1); // .eq case - load true LoadConstant(rl_result.reg, 0); // .ne case - load false OpEndIT(it); } else { LoadConstant(rl_result.reg, 0); // ne case - load false - branchover = OpCmpBranch(kCondNe, TargetRefReg(kArg1), TargetRefReg(kArg2), NULL); + branchover = OpCmpBranch(kCondNe, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL); LoadConstant(rl_result.reg, 1); // eq case - load true } } else { @@ -1278,11 +1281,11 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LIR* it = nullptr; if (!type_known_abstract) { /* Uses conditional nullification */ - OpRegReg(kOpCmp, TargetRefReg(kArg1), TargetRefReg(kArg2)); // Same? + OpRegReg(kOpCmp, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef)); // Same? it = OpIT(kCondEq, "EE"); // if-convert the test - LoadConstant(TargetReg(kArg0, false), 1); // .eq case - load true + LoadConstant(TargetReg(kArg0, kNotWide), 1); // .eq case - load true } - OpRegCopy(TargetRefReg(kArg0), TargetRefReg(kArg2)); // .ne case - arg0 <= class + OpRegCopy(TargetReg(kArg0, kRef), TargetReg(kArg2, kRef)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) if (it != nullptr) { OpEndIT(it); @@ -1292,12 +1295,12 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (!type_known_abstract) { /* Uses branchovers */ LoadConstant(rl_result.reg, 1); // assume true - branchover = OpCmpBranch(kCondEq, TargetRefReg(kArg1), TargetRefReg(kArg2), NULL); + branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL); } RegStorage r_tgt = cu_->target64 ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); - OpRegCopy(TargetRefReg(kArg0), TargetRefReg(kArg2)); // .ne case - arg0 <= class + OpRegCopy(TargetReg(kArg0, kRef), TargetReg(kArg2, kRef)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) FreeTemp(r_tgt); } @@ -1351,9 +1354,9 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ FlushAllRegs(); // May generate a call - use explicit registers LockCallTemps(); - RegStorage method_reg = TargetRefReg(kArg1); + RegStorage method_reg = TargetReg(kArg1, kRef); LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* - RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class* + RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kRet0 @@ -1365,7 +1368,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), type_idx, true); } - OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path + OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path } else if (use_declaring_class) { LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); @@ -1396,12 +1399,12 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // InitializeTypeFromCode(idx, method) if (m2l_->cu_->target64) { m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, - m2l_->TargetRefReg(kArg1), true); + m2l_->TargetReg(kArg1, kRef), true); } else { m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, - m2l_->TargetRefReg(kArg1), true); + m2l_->TargetReg(kArg1, kRef), true); } - m2l_->OpRegCopy(class_reg_, m2l_->TargetRefReg(kRet0)); // Align usage with fast path + m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path m2l_->OpUnconditionalBranch(cont_); } @@ -1414,7 +1417,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ } } // At this point, class_reg (kArg2) has class - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref // Slow path for the case where the classes are not equal. In this case we need // to call a helper function to do the check. @@ -1428,15 +1431,17 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ GenerateTargetLabel(); if (load_) { - m2l_->LoadRefDisp(m2l_->TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), - m2l_->TargetRefReg(kArg1), kNotVolatile); + m2l_->LoadRefDisp(m2l_->TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), + m2l_->TargetReg(kArg1, kRef), kNotVolatile); } if (m2l_->cu_->target64) { - m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetRefReg(kArg2), - m2l_->TargetRefReg(kArg1), true); + m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), + m2l_->TargetReg(kArg2, kRef), m2l_->TargetReg(kArg1, kRef), + true); } else { - m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetRefReg(kArg2), - m2l_->TargetRefReg(kArg1), true); + m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), + m2l_->TargetReg(kArg2, kRef), m2l_->TargetReg(kArg1, kRef), + true); } m2l_->OpUnconditionalBranch(cont_); @@ -1448,7 +1453,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ if (type_known_abstract) { // Easier case, run slow path if target is non-null (slow path will load from target) - LIR* branch = OpCmpImmBranch(kCondNe, TargetRefReg(kArg0), 0, nullptr); + LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0, kRef), 0, nullptr); LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); } else { @@ -1457,13 +1462,13 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // slow path if the classes are not equal. /* Null is OK - continue */ - LIR* branch1 = OpCmpImmBranch(kCondEq, TargetRefReg(kArg0), 0, nullptr); + LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, nullptr); /* load object->klass_ */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); - LoadRefDisp(TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), - TargetRefReg(kArg1), kNotVolatile); + LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), + TargetReg(kArg1, kRef), kNotVolatile); - LIR* branch2 = OpCmpBranch(kCondNe, TargetRefReg(kArg1), class_reg, nullptr); + LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1, kRef), class_reg, nullptr); LIR* cont = NewLIR0(kPseudoTargetLabel); // Add the slow path that will not perform load since this is already done. @@ -1486,8 +1491,8 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des * lr is used explicitly elsewhere in the code generator and cannot * normally be used as a general temp register. */ - MarkTemp(TargetReg(kLr)); // Add lr to the temp pool - FreeTemp(TargetReg(kLr)); // and make it available + MarkTemp(TargetReg(kLr, kNotWide)); // Add lr to the temp pool + FreeTemp(TargetReg(kLr, kNotWide)); // and make it available } rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); @@ -1514,8 +1519,8 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des FreeRegLocTemps(rl_result, rl_src2); StoreValueWide(rl_dest, rl_result); if (cu_->instruction_set == kThumb2) { - Clobber(TargetReg(kLr)); - UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool + Clobber(TargetReg(kLr, kNotWide)); + UnmarkTemp(TargetReg(kLr, kNotWide)); // Remove lr from the temp pool } } @@ -1679,13 +1684,13 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, // If we haven't already generated the code use the callout function. if (!done) { FlushAllRegs(); /* Send everything to home location */ - LoadValueDirectFixed(rl_src2, TargetReg(kArg1, false)); + LoadValueDirectFixed(rl_src2, TargetReg(kArg1, kNotWide)); RegStorage r_tgt = cu_->target64 ? CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) : CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod)); - LoadValueDirectFixed(rl_src1, TargetReg(kArg0, false)); + LoadValueDirectFixed(rl_src1, TargetReg(kArg0, kNotWide)); if (check_zero) { - GenDivZeroCheck(TargetReg(kArg1, false)); + GenDivZeroCheck(TargetReg(kArg1, kNotWide)); } // NOTE: callout here is not a safepoint. if (cu_->target64) { @@ -1949,14 +1954,14 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re if (!done) { FlushAllRegs(); /* Everything to home location. */ - LoadValueDirectFixed(rl_src, TargetReg(kArg0, false)); - Clobber(TargetReg(kArg0, false)); + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kNotWide)); + Clobber(TargetReg(kArg0, kNotWide)); if (cu_->target64) { - CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0, false), lit, - false); + CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0, kNotWide), + lit, false); } else { - CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0, false), lit, - false); + CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0, kNotWide), + lit, false); } if (is_div) rl_result = GetReturn(kCoreReg); @@ -1989,7 +1994,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc bool call_out = false; bool check_zero = false; ThreadOffset<pointer_size> func_offset(-1); - int ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); + int ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg(); switch (opcode) { case Instruction::NOT_LONG: @@ -2037,7 +2042,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc return; } else { call_out = true; - ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); + ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg(); func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul); } break; @@ -2049,7 +2054,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc } call_out = true; check_zero = true; - ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); + ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg(); func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv); break; case Instruction::REM_LONG: @@ -2062,8 +2067,8 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc check_zero = true; func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod); /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ - ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2, false).GetReg() : - mir_to_lir->TargetReg(kRet0, false).GetReg(); + ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2, kNotWide).GetReg() : + mir_to_lir->TargetReg(kRet0, kNotWide).GetReg(); break; case Instruction::AND_LONG_2ADDR: case Instruction::AND_LONG: @@ -2106,11 +2111,11 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc } else { mir_to_lir->FlushAllRegs(); /* Send everything to home location */ if (check_zero) { - RegStorage r_tmp1 = mir_to_lir->TargetReg(kArg0, kArg1); - RegStorage r_tmp2 = mir_to_lir->TargetReg(kArg2, kArg3); + RegStorage r_tmp1 = mir_to_lir->TargetReg(kArg0, kWide); + RegStorage r_tmp2 = mir_to_lir->TargetReg(kArg2, kWide); mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2); RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset); - mir_to_lir->GenDivZeroCheckWide(mir_to_lir->TargetReg(kArg2, kArg3)); + mir_to_lir->GenDivZeroCheckWide(r_tmp2); mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1); // NOTE: callout here is not a safepoint mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */); @@ -2118,7 +2123,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); } // Adjust return regs in to handle case of rem returning kArg2/kArg3 - if (ret_reg == mir_to_lir->TargetReg(kRet0, false).GetReg()) + if (ret_reg == mir_to_lir->TargetReg(kRet0, kNotWide).GetReg()) rl_result = mir_to_lir->GetReturnWide(kCoreReg); else rl_result = mir_to_lir->GetReturnWideAlt(); diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 3a304304e9..367e07bb81 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -131,9 +131,10 @@ void Mir2Lir::CallRuntimeHelper(ThreadOffset<pointer_size> helper_offset, bool s INSTANTIATE(void Mir2Lir::CallRuntimeHelper, bool safepoint_pc) template <size_t pointer_size> -void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperImm(ThreadOffset<pointer_size> helper_offset, int arg0, + bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadConstant(TargetReg(kArg0, false), arg0); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -143,7 +144,7 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperReg(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); + OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -156,13 +157,7 @@ void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset<pointer_size> helper_off if (arg0.wide == 0) { LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0)); } else { - RegStorage r_tmp; - if (cu_->target64) { - r_tmp = TargetReg(kArg0, true); - } else { - r_tmp = TargetReg(arg0.fp ? kFArg0 : kArg0, arg0.fp ? kFArg1 : kArg1); - } - LoadValueDirectWideFixed(arg0, r_tmp); + LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide)); } ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); @@ -173,8 +168,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadConstant(TargetReg(kArg0, false), arg0); - LoadConstant(TargetReg(kArg1, false), arg1); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); + LoadConstant(TargetReg(kArg1, kNotWide), arg1); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -184,23 +179,14 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset<pointer_size> helper_offset, int arg0, RegLocation arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); + DCHECK(!arg1.fp); if (arg1.wide == 0) { LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1)); } else { - RegStorage r_tmp; - if (cu_->target64) { - r_tmp = TargetReg(kArg1, true); - } else { - if (cu_->instruction_set == kMips) { - // skip kArg1 for stack alignment. - r_tmp = TargetReg(kArg2, kArg3); - } else { - r_tmp = TargetReg(kArg1, kArg2); - } - } + RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide); LoadValueDirectWideFixed(arg1, r_tmp); } - LoadConstant(TargetReg(kArg0, false), arg0); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -213,7 +199,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset<pointer_size> helper_ RegStorage r_tgt = CallHelperSetup(helper_offset); DCHECK(!arg0.wide); LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0)); - LoadConstant(TargetReg(kArg1, false), arg1); + LoadConstant(TargetReg(kArg1, kNotWide), arg1); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -224,8 +210,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset<pointer_size> helper_offset, int arg0, RegStorage arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1); - LoadConstant(TargetReg(kArg0, false), arg0); + OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -235,8 +221,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, int arg1, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); - LoadConstant(TargetReg(kArg1, false), arg1); + OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0); + LoadConstant(TargetReg(kArg1, kNotWide), arg1); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -246,8 +232,8 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset<pointer_size> helper_offset, int arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadCurrMethodDirect(TargetRefReg(kArg1)); - LoadConstant(TargetReg(kArg0, false), arg0); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -257,11 +243,12 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset<pointer_size> helper_offset, RegStorage arg0, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - DCHECK(!IsSameReg(TargetReg(kArg1, arg0.Is64Bit()), arg0)); - if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) { - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); + DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0)); + RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind()); + if (r_tmp.NotExactlyEquals(arg0)) { + OpRegCopy(r_tmp, arg0); } - LoadCurrMethodDirect(TargetRefReg(kArg1)); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -272,11 +259,12 @@ void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset<pointer_size> h RegStorage arg0, RegLocation arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - DCHECK(!IsSameReg(TargetReg(kArg1, arg0.Is64Bit()), arg0)); - if (TargetReg(kArg0, arg0.Is64Bit()).NotExactlyEquals(arg0)) { - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); + DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0)); + RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind()); + if (r_tmp.NotExactlyEquals(arg0)) { + OpRegCopy(r_tmp, arg0); } - LoadCurrMethodDirect(TargetRefReg(kArg1)); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); @@ -313,47 +301,26 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset<pointer_size> } else { DCHECK(!cu_->target64); if (arg0.wide == 0) { - LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0, false) : TargetReg(kArg0, false)); + LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide)); if (arg1.wide == 0) { if (cu_->instruction_set == kMips) { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2, false) : TargetReg(kArg1, false)); + LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg1, kNotWide)); } else { - LoadValueDirectFixed(arg1, TargetReg(kArg1, false)); + LoadValueDirectFixed(arg1, TargetReg(kArg1, kNotWide)); } } else { if (cu_->instruction_set == kMips) { - RegStorage r_tmp; - if (arg1.fp) { - r_tmp = TargetReg(kFArg2, kFArg3); - } else { - // skip kArg1 for stack alignment. - r_tmp = TargetReg(kArg2, kArg3); - } - LoadValueDirectWideFixed(arg1, r_tmp); + LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide)); } else { - RegStorage r_tmp; - r_tmp = TargetReg(kArg1, kArg2); - LoadValueDirectWideFixed(arg1, r_tmp); + LoadValueDirectWideFixed(arg1, TargetReg(kArg1, kWide)); } } } else { - RegStorage r_tmp; - if (arg0.fp) { - r_tmp = TargetReg(kFArg0, kFArg1); - } else { - r_tmp = TargetReg(kArg0, kArg1); - } - LoadValueDirectWideFixed(arg0, r_tmp); + LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide)); if (arg1.wide == 0) { - LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2, false) : TargetReg(kArg2, false)); + LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide)); } else { - RegStorage r_tmp; - if (arg1.fp) { - r_tmp = TargetReg(kFArg2, kFArg3); - } else { - r_tmp = TargetReg(kArg2, kArg3); - } - LoadValueDirectWideFixed(arg1, r_tmp); + LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide)); } } } @@ -364,19 +331,21 @@ INSTANTIATE(void Mir2Lir::CallRuntimeHelperRegLocationRegLocation, RegLocation a RegLocation arg1, bool safepoint_pc) void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) { - if (IsSameReg(arg1, TargetReg(kArg0, arg1.Is64Bit()))) { - if (IsSameReg(arg0, TargetReg(kArg1, arg0.Is64Bit()))) { + WideKind arg0_kind = arg0.GetWideKind(); + WideKind arg1_kind = arg1.GetWideKind(); + if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) { + if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) { // Swap kArg0 and kArg1 with kArg2 as temp. - OpRegCopy(TargetReg(kArg2, arg1.Is64Bit()), arg1); - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); - OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), TargetReg(kArg2, arg1.Is64Bit())); + OpRegCopy(TargetReg(kArg2, arg1_kind), arg1); + OpRegCopy(TargetReg(kArg0, arg0_kind), arg0); + OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind)); } else { - OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1); - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); + OpRegCopy(TargetReg(kArg1, arg1_kind), arg1); + OpRegCopy(TargetReg(kArg0, arg0_kind), arg0); } } else { - OpRegCopy(TargetReg(kArg0, arg0.Is64Bit()), arg0); - OpRegCopy(TargetReg(kArg1, arg1.Is64Bit()), arg1); + OpRegCopy(TargetReg(kArg0, arg0_kind), arg0); + OpRegCopy(TargetReg(kArg1, arg1_kind), arg1); } } @@ -396,7 +365,7 @@ void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset<pointer_size> helper_offse RegStorage arg1, int arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); CopyToArgumentRegs(arg0, arg1); - LoadConstant(TargetReg(kArg2, false), arg2); + LoadConstant(TargetReg(kArg2, kNotWide), arg2); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -408,8 +377,8 @@ void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset<pointer_size> h int arg0, RegLocation arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); - LoadCurrMethodDirect(TargetRefReg(kArg1)); - LoadConstant(TargetReg(kArg0, false), arg0); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -420,9 +389,9 @@ template <size_t pointer_size> void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset<pointer_size> helper_offset, int arg0, int arg2, bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); - LoadCurrMethodDirect(TargetRefReg(kArg1)); - LoadConstant(TargetReg(kArg2, false), arg2); - LoadConstant(TargetReg(kArg0, false), arg0); + LoadCurrMethodDirect(TargetReg(kArg1, kRef)); + LoadConstant(TargetReg(kArg2, kNotWide), arg2); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -439,15 +408,9 @@ void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset<pointer_si if (arg2.wide == 0) { LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2)); } else { - RegStorage r_tmp; - if (cu_->target64) { - r_tmp = TargetReg(kArg2, true); - } else { - r_tmp = TargetReg(kArg2, kArg3); - } - LoadValueDirectWideFixed(arg2, r_tmp); + LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide)); } - LoadConstant(TargetReg(kArg0, false), arg0); + LoadConstant(TargetReg(kArg0, kNotWide), arg0); ClobberCallerSave(); CallHelper<pointer_size>(r_tgt, helper_offset, safepoint_pc); } @@ -455,10 +418,12 @@ INSTANTIATE(void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation, int arg0, RegLocation arg2, bool safepoint_pc) template <size_t pointer_size> -void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset<pointer_size> helper_offset, - RegLocation arg0, RegLocation arg1, - RegLocation arg2, - bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation( + ThreadOffset<pointer_size> helper_offset, + RegLocation arg0, + RegLocation arg1, + RegLocation arg2, + bool safepoint_pc) { RegStorage r_tgt = CallHelperSetup(helper_offset); LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0)); LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1)); @@ -485,7 +450,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { */ RegLocation rl_src = rl_method; rl_src.location = kLocPhysReg; - rl_src.reg = TargetRefReg(kArg0); + rl_src.reg = TargetReg(kArg0, kRef); rl_src.home = false; MarkLive(rl_src); StoreValue(rl_method, rl_src); @@ -559,15 +524,44 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { } else { // If arriving in frame & promoted if (v_map->core_location == kLocPhysReg) { - Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->core_reg)); + Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), + RegStorage::Solo32(v_map->core_reg)); } if (v_map->fp_location == kLocPhysReg) { - Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->fp_reg)); + Load32Disp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), + RegStorage::Solo32(v_map->fp_reg)); } } } } +static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) { + RegLocation rl_arg = info->args[0]; + cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef)); +} + +static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) { + cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags); + // get this->klass_ [use kArg1, set kArg0] + cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(), + cg->TargetReg(kArg0, kRef), + kNotVolatile); + cg->MarkPossibleNullPointerException(info->opt_flags); +} + +static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info, + const RegStorage* alt_from, + const CompilationUnit* cu, Mir2Lir* cg) { + if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { + // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt] + cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, + mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), + cg->TargetPtrReg(kInvokeTgt)); + return true; + } + return false; +} + /* * Bit of a hack here - in the absence of a real scheduling pass, * emit the next instruction in static & direct invoke sequences. @@ -589,7 +583,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, cg->LoadCodeAddress(target_method, type, kInvokeTgt); } if (direct_method != static_cast<uintptr_t>(-1)) { - cg->LoadConstant(cg->TargetRefReg(kArg0), direct_method); + cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method); } else { cg->LoadMethodAddress(target_method, type, kArg0); } @@ -598,7 +592,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, return -1; } } else { - RegStorage arg0_ref = cg->TargetRefReg(kArg0); + RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); switch (state) { case 0: // Get the current Method* [sets kArg0] // TUNING: we can save a reg copy if Method* has been promoted. @@ -627,12 +621,11 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, kNotVolatile); break; case 3: // Grab the code from the method* - if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { - if (direct_code == 0) { - cg->LoadWordDisp(arg0_ref, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), - cg->TargetPtrReg(kInvokeTgt)); + if (direct_code == 0) { + if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) { + break; // kInvokeTgt := arg0_ref->entrypoint } + } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { break; } // Intentional fallthrough for x86 @@ -660,34 +653,24 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, * fully resolved at compile time. */ switch (state) { - case 0: { // Get "this" [set kArg1] - RegLocation rl_arg = info->args[0]; - cg->LoadValueDirectFixed(rl_arg, cg->TargetRefReg(kArg1)); + case 0: + CommonCallCodeLoadThisIntoArg1(info, cg); // kArg1 := this break; - } - case 1: // Is "this" null? [use kArg1] - cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags); - // get this->klass_ [use kArg1, set kArg0] - cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(), - cg->TargetRefReg(kArg0), - kNotVolatile); - cg->MarkPossibleNullPointerException(info->opt_flags); + case 1: + CommonCallCodeLoadClassIntoArg0(info, cg); // kArg0 := kArg1->class + // Includes a null-check. break; case 2: { // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0] int32_t offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() + method_idx * sizeof(mirror::Class::VTableEntry); // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0] - cg->LoadRefDisp(cg->TargetRefReg(kArg0), offset, cg->TargetRefReg(kArg0), kNotVolatile); + cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile); break; } case 3: - if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { - // Get the compiled code address [use kArg0, set kInvokeTgt] - cg->LoadWordDisp(cg->TargetRefReg(kArg0), - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), - cg->TargetPtrReg(kInvokeTgt)); - break; + if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) { + break; // kInvokeTgt := kArg0->entrypoint } // Intentional fallthrough for X86 default: @@ -711,40 +694,28 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, switch (state) { case 0: // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)] CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); - cg->LoadConstant(cg->TargetReg(kHiddenArg, false), target_method.dex_method_index); + cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index); if (cu->instruction_set == kX86) { - cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, false), cg->TargetReg(kHiddenArg, false)); + cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide)); } break; - case 1: { // Get "this" [set kArg1] - RegLocation rl_arg = info->args[0]; - cg->LoadValueDirectFixed(rl_arg, cg->TargetRefReg(kArg1)); + case 1: + CommonCallCodeLoadThisIntoArg1(info, cg); // kArg1 := this break; - } - case 2: // Is "this" null? [use kArg1] - cg->GenNullCheck(cg->TargetRefReg(kArg1), info->opt_flags); - // Get this->klass_ [use kArg1, set kArg0] - cg->LoadRefDisp(cg->TargetRefReg(kArg1), mirror::Object::ClassOffset().Int32Value(), - cg->TargetRefReg(kArg0), - kNotVolatile); - cg->MarkPossibleNullPointerException(info->opt_flags); + case 2: + CommonCallCodeLoadClassIntoArg0(info, cg); // kArg0 := kArg1->class + // Includes a null-check. break; case 3: { // Get target method [use kInvokeTgt, set kArg0] int32_t offset = mirror::Class::EmbeddedImTableOffset().Uint32Value() + (method_idx % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry); // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0] - cg->LoadRefDisp(cg->TargetRefReg(kArg0), offset, - cg->TargetRefReg(kArg0), - kNotVolatile); + cg->LoadRefDisp(cg->TargetReg(kArg0, kRef), offset, cg->TargetReg(kArg0, kRef), kNotVolatile); break; } case 4: - if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { - // Get the compiled code address [use kArg0, set kInvokeTgt] - cg->LoadWordDisp(cg->TargetRefReg(kArg0), - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), - cg->TargetPtrReg(kInvokeTgt)); - break; + if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) { + break; // kInvokeTgt := kArg0->entrypoint } // Intentional fallthrough for X86 default: @@ -754,9 +725,9 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, } template <size_t pointer_size> -static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset<pointer_size> trampoline, - int state, const MethodReference& target_method, - uint32_t method_idx) { +static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, + ThreadOffset<pointer_size> trampoline, int state, + const MethodReference& target_method, uint32_t method_idx) { Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); /* * This handles the case in which the base method is not fully @@ -765,11 +736,12 @@ static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset<po if (state == 0) { if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { // Load trampoline target - cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(), cg->TargetPtrReg(kInvokeTgt)); + cg->LoadWordDisp(cg->TargetPtrReg(kSelf), trampoline.Int32Value(), + cg->TargetPtrReg(kInvokeTgt)); } // Load kArg0 with method index CHECK_EQ(cu->dex_file, target_method.dex_file); - cg->LoadConstant(cg->TargetReg(kArg0, false), target_method.dex_method_index); + cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index); return 1; } return -1; @@ -820,10 +792,12 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, uint32_t unused, uintptr_t unused2, uintptr_t unused3, InvokeType unused4) { if (cu->target64) { - ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeVirtualTrampolineWithAccessCheck); + ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, + pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0); } else { - ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeVirtualTrampolineWithAccessCheck); + ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, + pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0); } } @@ -834,10 +808,12 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, uint32_t unused, uintptr_t unused2, uintptr_t unused3, InvokeType unused4) { if (cu->target64) { - ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, pInvokeInterfaceTrampolineWithAccessCheck); + ThreadOffset<8> trampoline = QUICK_ENTRYPOINT_OFFSET(8, + pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP<8>(cu, info, trampoline, state, target_method, 0); } else { - ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, pInvokeInterfaceTrampolineWithAccessCheck); + ThreadOffset<4> trampoline = QUICK_ENTRYPOINT_OFFSET(4, + pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP<4>(cu, info, trampoline, state, target_method, 0); } } @@ -848,7 +824,8 @@ int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state, uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method, InvokeType type, bool skip_this) { int last_arg_reg = 3 - 1; - int arg_regs[3] = {TargetReg(kArg1, false).GetReg(), TargetReg(kArg2, false).GetReg(), TargetReg(kArg3, false).GetReg()}; + int arg_regs[3] = {TargetReg(kArg1, kNotWide).GetReg(), TargetReg(kArg2, kNotWide).GetReg(), + TargetReg(kArg3, kNotWide).GetReg()}; int next_reg = 0; int next_arg = 0; @@ -923,7 +900,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, } } else { // kArg2 & rArg3 can safely be used here - reg = TargetReg(kArg3, false); + reg = TargetReg(kArg3, kNotWide); { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); Load32Disp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg); @@ -947,7 +924,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, if (rl_arg.location == kLocPhysReg) { arg_reg = rl_arg.reg; } else { - arg_reg = rl_arg.wide ? TargetReg(kArg2, kArg3) : TargetReg(kArg2, false); + arg_reg = TargetReg(kArg2, rl_arg.wide ? kWide : kNotWide); if (rl_arg.wide) { LoadValueDirectWideFixed(rl_arg, arg_reg); } else { @@ -978,13 +955,13 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, if (pcrLabel) { if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { - *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); + *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - Load32Disp(TargetRefReg(kArg1), 0, tmp); + Load32Disp(TargetReg(kArg1, kRef), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } @@ -1058,23 +1035,23 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Use vldm/vstm pair using kArg3 as a temp call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); - OpRegRegImm(kOpAdd, TargetRefReg(kArg3), TargetPtrReg(kSp), start_offset); + OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), start_offset); LIR* ld = nullptr; { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - ld = OpVldm(TargetRefReg(kArg3), regs_left_to_pass_via_stack); + ld = OpVldm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack); } // TUNING: loosen barrier ld->u.m.def_mask = &kEncodeAll; call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); - OpRegRegImm(kOpAdd, TargetRefReg(kArg3), TargetPtrReg(kSp), 4 /* Method* */ + (3 * 4)); + OpRegRegImm(kOpAdd, TargetReg(kArg3, kRef), TargetPtrReg(kSp), 4 /* Method* */ + (3 * 4)); call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); LIR* st = nullptr; { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - st = OpVstm(TargetRefReg(kArg3), regs_left_to_pass_via_stack); + st = OpVstm(TargetReg(kArg3, kRef), regs_left_to_pass_via_stack); } st->u.m.def_mask = &kEncodeAll; call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, @@ -1148,7 +1125,8 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, if (ld2 != nullptr) { // For 64-bit load we can actually set up the aliasing information. AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true); - AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true); + AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, + true); } else { // Set barrier for 128-bit load. ld1->u.m.def_mask = &kEncodeAll; @@ -1158,7 +1136,8 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, if (st2 != nullptr) { // For 64-bit store we can actually set up the aliasing information. AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true); - AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true); + AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, + true); } else { // Set barrier for 128-bit store. st1->u.m.def_mask = &kEncodeAll; @@ -1173,7 +1152,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Instead of allocating a new temp, simply reuse one of the registers being used // for argument passing. - RegStorage temp = TargetReg(kArg3, false); + RegStorage temp = TargetReg(kArg3, kNotWide); // Now load the argument VR and store to the outs. Load32Disp(TargetPtrReg(kSp), current_src_offset, temp); @@ -1186,14 +1165,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, } } else { // Generate memcpy - OpRegRegImm(kOpAdd, TargetRefReg(kArg0), TargetPtrReg(kSp), outs_offset); - OpRegRegImm(kOpAdd, TargetRefReg(kArg1), TargetPtrReg(kSp), start_offset); + OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset); + OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset); if (cu_->target64) { - CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetRefReg(kArg0), - TargetRefReg(kArg1), (info->num_arg_words - 3) * 4, false); + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(8, pMemcpy), TargetReg(kArg0, kRef), + TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false); } else { - CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetRefReg(kArg0), - TargetRefReg(kArg1), (info->num_arg_words - 3) * 4, false); + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(4, pMemcpy), TargetReg(kArg0, kRef), + TargetReg(kArg1, kRef), (info->num_arg_words - 3) * 4, false); } } @@ -1205,13 +1184,13 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, direct_code, direct_method, type); if (pcrLabel) { if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { - *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); + *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - Load32Disp(TargetRefReg(kArg1), 0, tmp); + Load32Disp(TargetReg(kArg1, kRef), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } @@ -1550,9 +1529,9 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { ClobberCallerSave(); LockCallTemps(); // Using fixed registers - RegStorage reg_ptr = TargetRefReg(kArg0); - RegStorage reg_char = TargetReg(kArg1, false); - RegStorage reg_start = TargetReg(kArg2, false); + RegStorage reg_ptr = TargetReg(kArg0, kRef); + RegStorage reg_char = TargetReg(kArg1, kNotWide); + RegStorage reg_start = TargetReg(kArg2, kNotWide); LoadValueDirectFixed(rl_obj, reg_ptr); LoadValueDirectFixed(rl_char, reg_char); @@ -1594,8 +1573,8 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { } ClobberCallerSave(); LockCallTemps(); // Using fixed registers - RegStorage reg_this = TargetRefReg(kArg0); - RegStorage reg_cmp = TargetRefReg(kArg1); + RegStorage reg_this = TargetReg(kArg0, kRef); + RegStorage reg_cmp = TargetReg(kArg1, kRef); RegLocation rl_this = info->args[0]; RegLocation rl_cmp = info->args[1]; @@ -1877,7 +1856,7 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) { call_inst = reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type); } else { - call_inst = OpMem(kOpBlx, TargetRefReg(kArg0), + call_inst = OpMem(kOpBlx, TargetReg(kArg0, kRef), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()); } } else { diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index d1e83c2230..13dd1886a0 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -1198,29 +1198,28 @@ class Mir2Lir : public Backend { /** * @brief Portable way of getting special registers from the backend. * @param reg Enumeration describing the purpose of the register. - * @param is_wide Whether the view should be 64-bit (rather than 32-bit). + * @param wide_kind What kind of view of the special register is required. * @return Return the #RegStorage corresponding to the given purpose @p reg. + * + * Note: For 32b system, wide (kWide) views only make sense for the argument registers and the + * return. In that case, this function should return a pair where the first component of + * the result will be the indicated special register. */ - virtual RegStorage TargetReg(SpecialTargetRegister reg, bool is_wide) { - return TargetReg(reg); - } - - /** - * @brief Portable way of getting special register pair from the backend. - * @param reg Enumeration describing the purpose of the first register. - * @param reg Enumeration describing the purpose of the second register. - * @return Return the #RegStorage corresponding to the given purpose @p reg. - */ - virtual RegStorage TargetReg(SpecialTargetRegister reg1, SpecialTargetRegister reg2) { - return RegStorage::MakeRegPair(TargetReg(reg1, false), TargetReg(reg2, false)); - } - - /** - * @brief Portable way of getting a special register for storing a reference. - * @see TargetReg() - */ - virtual RegStorage TargetRefReg(SpecialTargetRegister reg) { - return TargetReg(reg); + virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) { + if (wide_kind == kWide) { + DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg7) || (kRet0 == reg)); + COMPILE_ASSERT((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) && + (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) && + (kArg7 == kArg6 + 1), kargs_range_unexpected); + COMPILE_ASSERT((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) && + (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) && + (kFArg7 == kFArg6 + 1), kfargs_range_unexpected); + COMPILE_ASSERT(kRet1 == kRet0 + 1, kret_range_unexpected); + return RegStorage::MakeRegPair(TargetReg(reg), + TargetReg(static_cast<SpecialTargetRegister>(reg + 1))); + } else { + return TargetReg(reg); + } } /** @@ -1234,9 +1233,9 @@ class Mir2Lir : public Backend { // Get a reg storage corresponding to the wide & ref flags of the reg location. virtual RegStorage TargetReg(SpecialTargetRegister reg, RegLocation loc) { if (loc.ref) { - return TargetRefReg(reg); + return TargetReg(reg, kRef); } else { - return TargetReg(reg, loc.wide); + return TargetReg(reg, loc.wide ? kWide : kNotWide); } } diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index 8e2a1e3532..b7441d7649 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -151,7 +151,7 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) { // Making a call - use explicit registers FlushAllRegs(); /* Everything to home location */ - RegStorage array_ptr = TargetRefReg(kArg0); + RegStorage array_ptr = TargetReg(kArg0, kRef); RegStorage payload = TargetPtrReg(kArg1); RegStorage method_start = TargetPtrReg(kArg2); diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index b0c54e86e9..dd1d2dc1c5 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -89,19 +89,26 @@ class X86Mir2Lir : public Mir2Lir { // Required for target - register utilities. RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE; RegStorage TargetReg32(SpecialTargetRegister reg); - RegStorage TargetReg(SpecialTargetRegister symbolic_reg, bool is_wide) OVERRIDE { - RegStorage reg = TargetReg32(symbolic_reg); - if (is_wide) { - return (reg.Is64Bit()) ? reg : As64BitReg(reg); + RegStorage TargetReg(SpecialTargetRegister symbolic_reg, WideKind wide_kind) OVERRIDE { + if (wide_kind == kWide) { + if (cu_->target64) { + return As64BitReg(TargetReg32(symbolic_reg)); + } else { + // x86: construct a pair. + DCHECK((kArg0 <= symbolic_reg && symbolic_reg < kArg3) || + (kFArg0 <= symbolic_reg && symbolic_reg < kFArg3) || + (kRet0 == symbolic_reg)); + return RegStorage::MakeRegPair(TargetReg32(symbolic_reg), + TargetReg32(static_cast<SpecialTargetRegister>(symbolic_reg + 1))); + } + } else if (wide_kind == kRef && cu_->target64) { + return As64BitReg(TargetReg32(symbolic_reg)); } else { - return (reg.Is32Bit()) ? reg : As32BitReg(reg); + return TargetReg32(symbolic_reg); } } - RegStorage TargetRefReg(SpecialTargetRegister symbolic_reg) OVERRIDE { - return TargetReg(symbolic_reg, cu_->target64); - } RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE { - return TargetReg(symbolic_reg, cu_->target64); + return TargetReg(symbolic_reg, cu_->target64 ? kWide : kNotWide); } RegStorage GetArgMappingToPhysicalReg(int arg_num); RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num); diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 1c63da40d3..2f27482e55 100755 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -858,7 +858,8 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { RegLocation rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg); RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg); LoadValueDirectWide(rl_src_expected, rs_r0q); - NewLIR5(kX86LockCmpxchg64AR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg()); + NewLIR5(kX86LockCmpxchg64AR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, + rl_new_value.reg.GetReg()); // After a store we need to insert barrier in case of potential load. Since the // locked cmpxchg has full barrier semantics, only a scheduling barrier will be generated. @@ -954,7 +955,8 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { rl_offset = LoadValue(rl_src_offset, kCoreReg); } LoadValueDirect(rl_src_expected, rs_r0); - NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg()); + NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, + rl_new_value.reg.GetReg()); // After a store we need to insert barrier to prevent reordering with either // earlier or later memory accesses. Since @@ -1069,23 +1071,23 @@ void X86Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage new_index = index_; // Move index out of kArg1, either directly to kArg0, or to kArg2. // TODO: clean-up to check not a number but with type - if (index_ == m2l_->TargetReg(kArg1, false)) { - if (array_base_ == m2l_->TargetRefReg(kArg0)) { - m2l_->OpRegCopy(m2l_->TargetReg(kArg2, false), index_); - new_index = m2l_->TargetReg(kArg2, false); + if (index_ == m2l_->TargetReg(kArg1, kNotWide)) { + if (array_base_ == m2l_->TargetReg(kArg0, kRef)) { + m2l_->OpRegCopy(m2l_->TargetReg(kArg2, kNotWide), index_); + new_index = m2l_->TargetReg(kArg2, kNotWide); } else { - m2l_->OpRegCopy(m2l_->TargetReg(kArg0, false), index_); - new_index = m2l_->TargetReg(kArg0, false); + m2l_->OpRegCopy(m2l_->TargetReg(kArg0, kNotWide), index_); + new_index = m2l_->TargetReg(kArg0, kNotWide); } } // Load array length to kArg1. - m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, false), array_base_, len_offset_); + m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_); if (cu_->target64) { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), - new_index, m2l_->TargetReg(kArg1, false), true); + new_index, m2l_->TargetReg(kArg1, kNotWide), true); } else { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), - new_index, m2l_->TargetReg(kArg1, false), true); + new_index, m2l_->TargetReg(kArg1, kNotWide), true); } } @@ -1118,14 +1120,16 @@ void X86Mir2Lir::GenArrayBoundsCheck(int32_t index, GenerateTargetLabel(kPseudoThrowTarget); // Load array length to kArg1. - m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, false), array_base_, len_offset_); - m2l_->LoadConstant(m2l_->TargetReg(kArg0, false), index_); + m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, kNotWide), array_base_, len_offset_); + m2l_->LoadConstant(m2l_->TargetReg(kArg0, kNotWide), index_); if (cu_->target64) { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds), - m2l_->TargetReg(kArg0, false), m2l_->TargetReg(kArg1, false), true); + m2l_->TargetReg(kArg0, kNotWide), + m2l_->TargetReg(kArg1, kNotWide), true); } else { m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds), - m2l_->TargetReg(kArg0, false), m2l_->TargetReg(kArg1, false), true); + m2l_->TargetReg(kArg0, kNotWide), + m2l_->TargetReg(kArg1, kNotWide), true); } } @@ -1471,7 +1475,8 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, int displacement = SRegOffset(rl_src.s_reg_low); ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); - LIR *lir = NewLIR3(x86op, cu_->target64 ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), r_base, displacement + LOWORD_OFFSET); + LIR *lir = NewLIR3(x86op, cu_->target64 ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), + r_base, displacement + LOWORD_OFFSET); AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, true /* is_load */, true /* is64bit */); if (!cu_->target64) { @@ -2350,8 +2355,9 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, // If Method* is already in a register, we can save a copy. RegLocation rl_method = mir_graph_->GetMethodLoc(); - int32_t offset_of_type = mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() + - (sizeof(mirror::HeapReference<mirror::Class*>) * type_idx); + int32_t offset_of_type = mirror::Array::DataOffset( + sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() + + (sizeof(mirror::HeapReference<mirror::Class*>) * type_idx); if (rl_method.location == kLocPhysReg) { if (use_declaring_class) { @@ -2399,10 +2405,10 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k FlushAllRegs(); // May generate a call - use explicit registers. LockCallTemps(); - RegStorage method_reg = TargetRefReg(kArg1); // kArg1 gets current Method*. + RegStorage method_reg = TargetReg(kArg1, kRef); // kArg1 gets current Method*. LoadCurrMethodDirect(method_reg); - RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class*. - RegStorage ref_reg = TargetRefReg(kArg0); // kArg2 will hold the ref. + RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class*. + RegStorage ref_reg = TargetReg(kArg0, kRef); // kArg2 will hold the ref. // Reference must end up in kArg0. if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, @@ -2414,7 +2420,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), type_idx, true); } - OpRegCopy(class_reg, TargetRefReg(kRet0)); + OpRegCopy(class_reg, TargetReg(kRet0, kRef)); LoadValueDirectFixed(rl_src, ref_reg); } else if (use_declaring_class) { LoadValueDirectFixed(rl_src, ref_reg); @@ -2438,7 +2444,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k } else { CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); } - OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path. + OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path. LoadValueDirectFixed(rl_src, ref_reg); /* Reload Ref. */ // Rejoin code paths LIR* hop_target = NewLIR0(kPseudoTargetLabel); @@ -2459,7 +2465,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k // Is the class NULL? LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL); - RegStorage ref_class_reg = TargetRefReg(kArg1); // kArg2 will hold the Class*. + RegStorage ref_class_reg = TargetReg(kArg1, kRef); // kArg2 will hold the Class*. /* Load object->klass_. */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), ref_class_reg, @@ -2477,7 +2483,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k LoadConstant(rl_result.reg, 1); // Assume result succeeds. branchover = OpCmpBranch(kCondEq, ref_class_reg, class_reg, NULL); } - OpRegCopy(TargetRefReg(kArg0), class_reg); + OpRegCopy(TargetReg(kArg0, kRef), class_reg); if (cu_->target64) { OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)); } else { @@ -2617,7 +2623,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, } else { if (shift_op) { // X86 doesn't require masking and must use ECX. - RegStorage t_reg = TargetReg(kCount, false); // rCX + RegStorage t_reg = TargetReg(kCount, kNotWide); // rCX LoadValueDirectFixed(rl_rhs, t_reg); if (is_two_addr) { // Can we do this directly into memory? @@ -2805,7 +2811,7 @@ void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, } // X86 doesn't require masking and must use ECX. - RegStorage t_reg = TargetReg(kCount, false); // rCX + RegStorage t_reg = TargetReg(kCount, kNotWide); // rCX LoadValueDirectFixed(rl_shift, t_reg); if (is_two_addr) { // Can we do this directly into memory? diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 0083128501..bb1f379fe5 100755 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -204,7 +204,8 @@ RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) { case kSuspend: res_reg = RegStorage::InvalidReg(); break; case kLr: res_reg = RegStorage::InvalidReg(); break; case kPc: res_reg = RegStorage::InvalidReg(); break; - case kSp: res_reg = rs_rX86_SP; break; + case kSp: res_reg = rs_rX86_SP_32; break; // This must be the concrete one, as _SP is target- + // specific size. case kArg0: res_reg = rs_rX86_ARG0; break; case kArg1: res_reg = rs_rX86_ARG1; break; case kArg2: res_reg = rs_rX86_ARG2; break; @@ -968,7 +969,8 @@ void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeT uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id); // Generate the move instruction with the unique pointer and save index, dex_file, and type. - LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), + LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, + TargetReg(symbolic_reg, kNotWide).GetReg(), static_cast<int>(target_method_id_ptr), target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type); AppendLIR(move); @@ -985,7 +987,8 @@ void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic uintptr_t ptr = reinterpret_cast<uintptr_t>(&id); // Generate the move instruction with the unique pointer and save index and type. - LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(), + LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, + TargetReg(symbolic_reg, kNotWide).GetReg(), static_cast<int>(ptr), type_idx); AppendLIR(move); class_type_address_insns_.Insert(move); @@ -1321,7 +1324,8 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { } } else { // Load the start index from stack, remembering that we pushed EDI. - int displacement = SRegOffset(rl_start.s_reg_low) + (cu_->target64 ? 2 : 1) * sizeof(uint32_t); + int displacement = SRegOffset(rl_start.s_reg_low) + + (cu_->target64 ? 2 : 1) * sizeof(uint32_t); { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); Load32Disp(rs_rX86_SP, displacement, tmpReg); @@ -2241,21 +2245,24 @@ LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { } // ------------ ABI support: mapping of args to physical registers ------------- -RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) { +RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, + bool is_ref) { const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; - const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); + const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / + sizeof(SpecialTargetRegister); const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, - kFArg4, kFArg5, kFArg6, kFArg7}; - const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); + kFArg4, kFArg5, kFArg6, kFArg7}; + const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / + sizeof(SpecialTargetRegister); if (is_double_or_float) { if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) { - return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide); + return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide ? kWide : kNotWide); } } else { if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { - return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) : - ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); + return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], + is_ref ? kRef : (is_wide ? kWide : kNotWide)); } } return RegStorage::InvalidReg(); @@ -2267,7 +2274,8 @@ RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) { return res != mapping_.end() ? res->second : RegStorage::InvalidReg(); } -void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, InToRegStorageMapper* mapper) { +void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count, + InToRegStorageMapper* mapper) { DCHECK(mapper != nullptr); max_mapped_in_ = -1; is_there_stack_mapped_ = false; @@ -2338,13 +2346,13 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { RegLocation rl_src = rl_method; rl_src.location = kLocPhysReg; - rl_src.reg = TargetRefReg(kArg0); + rl_src.reg = TargetReg(kArg0, kRef); rl_src.home = false; MarkLive(rl_src); StoreValue(rl_method, rl_src); // If Method* has been promoted, explicitly flush if (rl_method.location == kLocPhysReg) { - StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile); + StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile); } if (cu_->num_ins == 0) { @@ -2502,7 +2510,8 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // The rest can be copied together int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low); - int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, cu_->instruction_set); + int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped, + cu_->instruction_set); int current_src_offset = start_offset; int current_dest_offset = outs_offset; @@ -2598,7 +2607,7 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Instead of allocating a new temp, simply reuse one of the registers being used // for argument passing. - RegStorage temp = TargetReg(kArg3, false); + RegStorage temp = TargetReg(kArg3, kNotWide); // Now load the argument VR and store to the outs. Load32Disp(rs_rX86_SP, current_src_offset, temp); @@ -2614,8 +2623,8 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Now handle rest not registers if they are if (in_to_reg_storage_mapping.IsThereStackMapped()) { - RegStorage regSingle = TargetReg(kArg2, false); - RegStorage regWide = TargetReg(kArg3, true); + RegStorage regSingle = TargetReg(kArg2, kNotWide); + RegStorage regWide = TargetReg(kArg3, kWide); for (int i = start_index; i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) { RegLocation rl_arg = info->args[i]; @@ -2674,13 +2683,13 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, direct_code, direct_method, type); if (pcrLabel) { if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) { - *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags); + *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags); } else { *pcrLabel = nullptr; // In lieu of generating a check for kArg1 being null, we need to // perform a load when doing implicit checks. RegStorage tmp = AllocTemp(); - Load32Disp(TargetRefReg(kArg1), 0, tmp); + Load32Disp(TargetReg(kArg1, kRef), 0, tmp); MarkPossibleNullPointerException(info->opt_flags); FreeTemp(tmp); } |