diff options
author | buzbee <buzbee@google.com> | 2014-02-20 11:48:04 -0800 |
---|---|---|
committer | buzbee <buzbee@google.com> | 2014-02-26 10:24:52 -0800 |
commit | 2c1ed456dcdb027d097825dd98dbe48c71599b6c (patch) | |
tree | 280b229d3495c8f7cccd23540144cd142a4dd068 | |
parent | 504e6997501aa19c7e7973e70f187314af95602c (diff) | |
download | android_art-2c1ed456dcdb027d097825dd98dbe48c71599b6c.tar.gz android_art-2c1ed456dcdb027d097825dd98dbe48c71599b6c.tar.bz2 android_art-2c1ed456dcdb027d097825dd98dbe48c71599b6c.zip |
Rework Quick compiler's register handling
For historical reasons, the Quick backend found it convenient
to consider all 64-bit Dalvik values held in registers
to be contained in a pair of 32-bit registers. Though this
worked well for ARM (with double-precision registers also
treated as a pair of 32-bit single-precision registers) it doesn't
play well with other targets. And, it is somewhat problematic
for 64-bit architectures.
This is the first of several CLs that will rework the way the
Quick backend deals with physical registers. The goal is to
eliminate the "64-bit value backed with 32-bit register pair"
requirement from the target-indendent portions of the backend
and support 64-bit registers throughout.
The key RegLocation struct, which describes the location of
Dalvik virtual register & register pairs, previously contained
fields for high and low physical registers. The low_reg and
high_reg fields are being replaced with a new type: RegStorage.
There will be a single instance of RegStorage for each RegLocation.
Note that RegStorage does not increase the space used. It is
16 bits wide, the same as the sum of the 8-bit low_reg and
high_reg fields.
At a target-independent level, it will describe whether the physical
register storage associated with the Dalvik value is a single 32
bit, single 64 bit, pair of 32 bit or vector. The actual register
number encoding is left to the target-dependent code layer.
Because physical register handling is pervasive throughout the
backend, this restructuring necessarily involves large CLs with
lots of changes. I'm going to roll these out in stages, and
attempt to segregate the CLs with largely mechanical changes from
those which restructure or rework the logic.
This CL is of the mechanical change variety - it replaces low_reg
and high_reg from RegLocation and introduces RegStorage. It also
includes a lot of new code (such as many calls to GetReg())
that should go away in upcoming CLs.
The tentative plan for the subsequent CLs is:
o Rework standard register utilities such as AllocReg() and
FreeReg() to use RegStorage instead of ints.
o Rework the target-independent GenXXX, OpXXX, LoadValue,
StoreValue, etc. routines to take RegStorage rather than
int register encodings.
o Take advantage of the vector representation and eliminate
the current vector field in RegLocation.
o Replace the "wide" variants of codegen utilities that take
low_reg/high_reg pairs with versions that use RegStorage.
o Add 64-bit register target independent codegen utilities
where possible, and where not virtualize with 32-bit general
register and 64-bit general register variants in the target
dependent layer.
o Expand/rework the LIR def/use flags to allow for more registers
(currently, we lose out on 16 MIPS floating point regs as
well as ARM's D16..D31 for lack of space in the masks).
o [Possibly] move the float/non-float determination of a register
from the target-dependent encoding to RegStorage. In other
words, replace IsFpReg(register_encoding_bits).
At the end of the day, all code in the target independent layer
should be using RegStorage, as should much of the target dependent
layer. Ideally, we won't be using the physical register number
encoding extracted from RegStorage (i.e. GetReg()) until the
NewLIRx() layer.
Change-Id: Idc5c741478f720bdd1d7123b94e4288be5ce52cb
29 files changed, 1122 insertions, 980 deletions
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h index 2174f679bf..ee602b0290 100644 --- a/compiler/dex/mir_graph.h +++ b/compiler/dex/mir_graph.h @@ -22,6 +22,7 @@ #include "compiler_ir.h" #include "arena_bit_vector.h" #include "utils/growable_array.h" +#include "reg_storage.h" namespace art { @@ -165,7 +166,7 @@ enum OatMethodAttributes { #define INVALID_SREG (-1) #define INVALID_VREG (0xFFFFU) -#define INVALID_REG (0xFF) +#define INVALID_REG (0x7F) #define INVALID_OFFSET (0xDEADF00FU) #define MIR_IGNORE_NULL_CHECK (1 << kMIRIgnoreNullCheck) @@ -328,9 +329,8 @@ struct RegLocation { unsigned ref:1; // Something GC cares about. unsigned high_word:1; // High word of pair? unsigned home:1; // Does this represent the home location? - VectorLengthType vec_len:3; // Is this value in a vector register, and how big is it? - uint8_t low_reg; // First physical register. - uint8_t high_reg; // 2nd physical register (if wide). + VectorLengthType vec_len:3; // TODO: remove. Is this value in a vector register, and how big is it? + RegStorage reg; // Encoded physical registers. int16_t s_reg_low; // SSA name for low Dalvik word. int16_t orig_sreg; // TODO: remove after Bitcode gen complete // and consolidate usage w/ s_reg_low. @@ -361,7 +361,7 @@ struct CallInfo { const RegLocation bad_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, kVectorNotUsed, - INVALID_REG, INVALID_REG, INVALID_SREG, INVALID_SREG}; + RegStorage(RegStorage::kInvalid), INVALID_SREG, INVALID_SREG}; class MIRGraph { public: diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 67d476929e..14d03a518c 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -207,10 +207,11 @@ size_t MIRGraph::GetNumAvailableNonSpecialCompilerTemps() { } } + +// FIXME - will probably need to revisit all uses of this, as type not defined. static const RegLocation temp_loc = {kLocCompilerTemp, - 0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/, - kVectorNotUsed, INVALID_REG, INVALID_REG, INVALID_SREG, - INVALID_SREG}; + 0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/, kVectorNotUsed, + RegStorage(), INVALID_SREG, INVALID_SREG}; CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) { // There is a limit to the number of non-special temps so check to make sure it wasn't exceeded. diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h index 37b4ec6dc7..bb0335234c 100644 --- a/compiler/dex/quick/arm/arm_lir.h +++ b/compiler/dex/quick/arm/arm_lir.h @@ -117,14 +117,6 @@ namespace art { // Mask to strip off fp flags. #define ARM_FP_REG_MASK (ARM_FP_REG_OFFSET-1) -// RegisterLocation templates return values (r0, or r0/r1). -#define ARM_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, INVALID_REG, \ - INVALID_SREG, INVALID_SREG} -#define ARM_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1, \ - INVALID_SREG, INVALID_SREG} -#define ARM_LOC_C_RETURN_FLOAT ARM_LOC_C_RETURN -#define ARM_LOC_C_RETURN_DOUBLE ARM_LOC_C_RETURN_WIDE - enum ArmResourceEncodingPos { kArmGPReg0 = 0, kArmRegSP = 13, @@ -225,6 +217,20 @@ enum ArmNativeRegisterPool { #define rARM_INVOKE_TGT rARM_LR #define rARM_COUNT INVALID_REG +// RegisterLocation templates return values (r0, or r0/r1). +const RegLocation arm_loc_c_return + {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG}; +const RegLocation arm_loc_c_return_wide + {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG}; +const RegLocation arm_loc_c_return_float + {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG}; +const RegLocation arm_loc_c_return_double + {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG}; + enum ArmShiftEncodings { kArmLsl = 0x0, kArmLsr = 0x1, diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index b36dde98b2..264bdedf86 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -79,7 +79,7 @@ void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset, LIR* target = NewLIR0(kPseudoTargetLabel); // Load next key/disp NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp)); - OpRegReg(kOpCmp, r_key, rl_src.low_reg); + OpRegReg(kOpCmp, r_key, rl_src.reg.GetReg()); // Go if match. NOTE: No instruction set switch here - must stay Thumb2 OpIT(kCondEq, ""); LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp); @@ -115,10 +115,10 @@ void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset, int keyReg; // Remove the bias, if necessary if (low_key == 0) { - keyReg = rl_src.low_reg; + keyReg = rl_src.reg.GetReg(); } else { keyReg = AllocTemp(); - OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key); + OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key); } // Bounds check - if < 0 or >= size continue following switch OpRegImm(kOpCmp, keyReg, size-1); @@ -293,7 +293,7 @@ void ArmMir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int reset_reg = AllocTemp(); - LoadWordDisp(rARM_SELF, ex_offset, rl_result.low_reg); + LoadWordDisp(rARM_SELF, ex_offset, rl_result.reg.GetReg()); LoadConstant(reset_reg, 0); StoreWordDisp(rARM_SELF, ex_offset, reset_reg); FreeTemp(reset_reg); diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h index 65dee807a1..2c0cead6ca 100644 --- a/compiler/dex/quick/arm/codegen_arm.h +++ b/compiler/dex/quick/arm/codegen_arm.h @@ -49,7 +49,7 @@ class ArmMir2Lir : public Mir2Lir { bool IsFpReg(int reg); bool SameRegType(int reg1, int reg2); int AllocTypedTemp(bool fp_hint, int reg_class); - int AllocTypedTempPair(bool fp_hint, int reg_class); + RegStorage AllocTypedTempWide(bool fp_hint, int reg_class); int S2d(int low_reg, int high_reg); int TargetReg(SpecialTargetRegister reg); int GetArgMappingToPhysicalReg(int arg_num); diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc index 46542e118c..dd0a429a85 100644 --- a/compiler/dex/quick/arm/fp_arm.cc +++ b/compiler/dex/quick/arm/fp_arm.cc @@ -63,7 +63,7 @@ void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, rl_src1 = LoadValue(rl_src1, kFPReg); rl_src2 = LoadValue(rl_src2, kFPReg); rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg); + NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); StoreValue(rl_dest, rl_result); } @@ -111,8 +111,8 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode, rl_result = EvalLoc(rl_dest, kFPReg, true); DCHECK(rl_dest.wide); DCHECK(rl_result.wide); - NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg), - S2d(rl_src2.low_reg, rl_src2.high_reg)); + NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()), + S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg())); StoreValueWide(rl_dest, rl_result); } @@ -143,16 +143,16 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, break; case Instruction::LONG_TO_DOUBLE: { rl_src = LoadValueWide(rl_src, kFPReg); - src_reg = S2d(rl_src.low_reg, rl_src.high_reg); + src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); rl_result = EvalLoc(rl_dest, kFPReg, true); // TODO: clean up AllocTempDouble so that its result has the double bits set. int tmp1 = AllocTempDouble(); int tmp2 = AllocTempDouble(); NewLIR2(kThumb2VcvtF64S32, tmp1 | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1); - NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.low_reg, rl_result.high_reg), (src_reg & ~ARM_FP_DOUBLE)); + NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), (src_reg & ~ARM_FP_DOUBLE)); LoadConstantWide(tmp2, tmp2 + 1, 0x41f0000000000000LL); - NewLIR3(kThumb2VmlaF64, S2d(rl_result.low_reg, rl_result.high_reg), tmp1 | ARM_FP_DOUBLE, + NewLIR3(kThumb2VmlaF64, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), tmp1 | ARM_FP_DOUBLE, tmp2 | ARM_FP_DOUBLE); FreeTemp(tmp1); FreeTemp(tmp2); @@ -173,18 +173,18 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, } if (rl_src.wide) { rl_src = LoadValueWide(rl_src, kFPReg); - src_reg = S2d(rl_src.low_reg, rl_src.high_reg); + src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); } else { rl_src = LoadValue(rl_src, kFPReg); - src_reg = rl_src.low_reg; + src_reg = rl_src.reg.GetReg(); } if (rl_dest.wide) { rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg); + NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg); StoreValueWide(rl_dest, rl_result); } else { rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(op, rl_result.low_reg, src_reg); + NewLIR2(op, rl_result.reg.GetReg(), src_reg); StoreValue(rl_dest, rl_result); } } @@ -199,14 +199,14 @@ void ArmMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, rl_src2 = mir_graph_->GetSrcWide(mir, 2); rl_src1 = LoadValueWide(rl_src1, kFPReg); rl_src2 = LoadValueWide(rl_src2, kFPReg); - NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg), - S2d(rl_src2.low_reg, rl_src2.high_reg)); + NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()), + S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg())); } else { rl_src1 = mir_graph_->GetSrc(mir, 0); rl_src2 = mir_graph_->GetSrc(mir, 1); rl_src1 = LoadValue(rl_src1, kFPReg); rl_src2 = LoadValue(rl_src2, kFPReg); - NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg); + NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); } NewLIR0(kThumb2Fmstat); ConditionCode ccode = mir->meta.ccode; @@ -273,28 +273,28 @@ void ArmMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, // In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc() ClobberSReg(rl_dest.s_reg_low); rl_result = EvalLoc(rl_dest, kCoreReg, true); - LoadConstant(rl_result.low_reg, default_result); - NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg), - S2d(rl_src2.low_reg, rl_src2.high_reg)); + LoadConstant(rl_result.reg.GetReg(), default_result); + NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()), + S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg())); } else { rl_src1 = LoadValue(rl_src1, kFPReg); rl_src2 = LoadValue(rl_src2, kFPReg); // In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc() ClobberSReg(rl_dest.s_reg_low); rl_result = EvalLoc(rl_dest, kCoreReg, true); - LoadConstant(rl_result.low_reg, default_result); - NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg); + LoadConstant(rl_result.reg.GetReg(), default_result); + NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); } - DCHECK(!ARM_FPREG(rl_result.low_reg)); + DCHECK(!ARM_FPREG(rl_result.reg.GetReg())); NewLIR0(kThumb2Fmstat); OpIT((default_result == -1) ? kCondGt : kCondMi, ""); - NewLIR2(kThumb2MovI8M, rl_result.low_reg, + NewLIR2(kThumb2MovI8M, rl_result.reg.GetReg(), ModifiedImmediate(-default_result)); // Must not alter ccodes GenBarrier(); OpIT(kCondEq, ""); - LoadConstant(rl_result.low_reg, 0); + LoadConstant(rl_result.reg.GetReg(), 0); GenBarrier(); StoreValue(rl_dest, rl_result); @@ -304,7 +304,7 @@ void ArmMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValue(rl_src, kFPReg); rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg); + NewLIR2(kThumb2Vnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg()); StoreValue(rl_dest, rl_result); } @@ -312,8 +312,8 @@ void ArmMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValueWide(rl_src, kFPReg); rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg), - S2d(rl_src.low_reg, rl_src.high_reg)); + NewLIR2(kThumb2Vnegd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), + S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg())); StoreValueWide(rl_dest, rl_result); } @@ -324,18 +324,18 @@ bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) { RegLocation rl_dest = InlineTargetWide(info); // double place for result rl_src = LoadValueWide(rl_src, kFPReg); RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg), - S2d(rl_src.low_reg, rl_src.high_reg)); - NewLIR2(kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg), - S2d(rl_result.low_reg, rl_result.high_reg)); + NewLIR2(kThumb2Vsqrtd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), + S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg())); + NewLIR2(kThumb2Vcmpd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), + S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg())); NewLIR0(kThumb2Fmstat); branch = NewLIR2(kThumbBCond, 0, kArmCondEq); ClobberCallerSave(); LockCallTemps(); // Using fixed registers int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt)); - NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg)); + NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg())); NewLIR1(kThumbBlxR, r_tgt); - NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1); + NewLIR3(kThumb2Fmdrr, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), r0, r1); branch->target = NewLIR0(kPseudoTargetLabel); StoreValueWide(rl_dest, rl_result); return true; diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index 43928fc5e2..7aff89e8b4 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -90,10 +90,10 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, rl_src2 = LoadValueWide(rl_src2, kCoreReg); int t_reg = AllocTemp(); LoadConstant(t_reg, -1); - OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg); + OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg()); LIR* branch1 = OpCondBranch(kCondLt, NULL); LIR* branch2 = OpCondBranch(kCondGt, NULL); - OpRegRegReg(kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg); + OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); LIR* branch3 = OpCondBranch(kCondEq, NULL); OpIT(kCondHi, "E"); @@ -107,7 +107,7 @@ void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, target1 = NewLIR0(kPseudoTargetLabel); RegLocation rl_temp = LocCReturn(); // Just using as template, will change - rl_temp.low_reg = t_reg; + rl_temp.reg.SetReg(t_reg); StoreValue(rl_dest, rl_temp); FreeTemp(t_reg); @@ -125,8 +125,8 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, LIR* taken = &block_label_list_[bb->taken]; LIR* not_taken = &block_label_list_[bb->fall_through]; rl_src1 = LoadValueWide(rl_src1, kCoreReg); - int32_t low_reg = rl_src1.low_reg; - int32_t high_reg = rl_src1.high_reg; + int32_t low_reg = rl_src1.reg.GetReg(); + int32_t high_reg = rl_src1.reg.GetHighReg(); if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) { int t_reg = AllocTemp(); @@ -178,15 +178,15 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { int false_val = mir->dalvikInsn.vC; rl_result = EvalLoc(rl_dest, kCoreReg, true); if ((true_val == 1) && (false_val == 0)) { - OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1); + OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 1); OpIT(kCondUlt, ""); - LoadConstant(rl_result.low_reg, 0); + LoadConstant(rl_result.reg.GetReg(), 0); GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) { - OpRegImm(kOpCmp, rl_src.low_reg, 0); + OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0); OpIT(kCondEq, "E"); - LoadConstant(rl_result.low_reg, true_val); - LoadConstant(rl_result.low_reg, false_val); + LoadConstant(rl_result.reg.GetReg(), true_val); + LoadConstant(rl_result.reg.GetReg(), false_val); GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact } else { // Unlikely case - could be tuned. @@ -194,10 +194,10 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { int t_reg2 = AllocTemp(); LoadConstant(t_reg1, true_val); LoadConstant(t_reg2, false_val); - OpRegImm(kOpCmp, rl_src.low_reg, 0); + OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0); OpIT(kCondEq, "E"); - OpRegCopy(rl_result.low_reg, t_reg1); - OpRegCopy(rl_result.low_reg, t_reg2); + OpRegCopy(rl_result.reg.GetReg(), t_reg1); + OpRegCopy(rl_result.reg.GetReg(), t_reg2); GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact } } else { @@ -207,17 +207,17 @@ void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { rl_true = LoadValue(rl_true, kCoreReg); rl_false = LoadValue(rl_false, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegImm(kOpCmp, rl_src.low_reg, 0); - if (rl_result.low_reg == rl_true.low_reg) { // Is the "true" case already in place? + OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0); + if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) { // Is the "true" case already in place? OpIT(kCondNe, ""); - OpRegCopy(rl_result.low_reg, rl_false.low_reg); - } else if (rl_result.low_reg == rl_false.low_reg) { // False case in place? + OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg()); + } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) { // False case in place? OpIT(kCondEq, ""); - OpRegCopy(rl_result.low_reg, rl_true.low_reg); + OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg()); } else { // Normal - select between the two. OpIT(kCondEq, "E"); - OpRegCopy(rl_result.low_reg, rl_true.low_reg); - OpRegCopy(rl_result.low_reg, rl_false.low_reg); + OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg()); + OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg()); } GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact } @@ -247,7 +247,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { LIR* not_taken = &block_label_list_[bb->fall_through]; rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); - OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg); + OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg()); switch (ccode) { case kCondEq: OpCondBranch(kCondNe, not_taken); @@ -278,7 +278,7 @@ void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) { default: LOG(FATAL) << "Unexpected ccode: " << ccode; } - OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg); + OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); OpCondBranch(ccode, taken); } @@ -415,21 +415,21 @@ bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int r_hi = AllocTemp(); int r_lo = AllocTemp(); - NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg); + NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.reg.GetReg()); switch (pattern) { case Divide3: - OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi, - rl_src.low_reg, EncodeShift(kArmAsr, 31)); + OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi, + rl_src.reg.GetReg(), EncodeShift(kArmAsr, 31)); break; case Divide5: - OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31); - OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi, + OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31); + OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi, EncodeShift(kArmAsr, magic_table[lit].shift)); break; case Divide7: - OpRegReg(kOpAdd, r_hi, rl_src.low_reg); - OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31); - OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi, + OpRegReg(kOpAdd, r_hi, rl_src.reg.GetReg()); + OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31); + OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi, EncodeShift(kArmAsr, magic_table[lit].shift)); break; default: @@ -476,7 +476,7 @@ RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2, RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (is_div) { // Simple case, use sdiv instruction. - OpRegRegReg(kOpDiv, rl_result.low_reg, reg1, reg2); + OpRegRegReg(kOpDiv, rl_result.reg.GetReg(), reg1, reg2); } else { // Remainder case, use the following code: // temp = reg1 / reg2 - integer division @@ -486,7 +486,7 @@ RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2, int temp = AllocTemp(); OpRegRegReg(kOpDiv, temp, reg1, reg2); OpRegReg(kOpMul, temp, reg2); - OpRegRegReg(kOpSub, rl_result.low_reg, reg1, temp); + OpRegRegReg(kOpSub, rl_result.reg.GetReg(), reg1, temp); FreeTemp(temp); } @@ -501,10 +501,10 @@ bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { rl_src2 = LoadValue(rl_src2, kCoreReg); RegLocation rl_dest = InlineTarget(info); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg); + OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); OpIT((is_min) ? kCondGt : kCondLt, "E"); - OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg); - OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg); + OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src2.reg.GetReg()); + OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src1.reg.GetReg()); GenBarrier(); StoreValue(rl_dest, rl_result); return true; @@ -518,18 +518,18 @@ bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (size == kLong) { // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0. - if (rl_address.low_reg != rl_result.low_reg) { - LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG); - LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG); + if (rl_address.reg.GetReg() != rl_result.reg.GetReg()) { + LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG); + LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG); } else { - LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG); - LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG); + LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG); + LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG); } StoreValueWide(rl_dest, rl_result); } else { DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord); // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0. - LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG); + LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG); StoreValue(rl_dest, rl_result); } return true; @@ -543,13 +543,13 @@ bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { if (size == kLong) { // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0. RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg); - StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, kWord); - StoreBaseDisp(rl_address.low_reg, 4, rl_value.high_reg, kWord); + StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), kWord); + StoreBaseDisp(rl_address.reg.GetReg(), 4, rl_value.reg.GetHighReg(), kWord); } else { DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord); // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0. RegLocation rl_value = LoadValue(rl_src_value, kCoreReg); - StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size); + StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size); } return true; } @@ -589,24 +589,24 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { bool load_early = true; if (is_long) { bool expected_is_core_reg = - rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.low_reg); + rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.reg.GetReg()); bool new_value_is_core_reg = - rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.low_reg); - bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.low_reg); - bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.low_reg); + rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.reg.GetReg()); + bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.reg.GetReg()); + bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.reg.GetReg()); if (!expected_is_good_reg && !new_value_is_good_reg) { // None of expected/new_value is non-temp reg, need to load both late load_early = false; // Make sure they are not in the temp regs and the load will not be skipped. if (expected_is_core_reg) { - FlushRegWide(rl_src_expected.low_reg, rl_src_expected.high_reg); + FlushRegWide(rl_src_expected.reg.GetReg(), rl_src_expected.reg.GetHighReg()); ClobberSReg(rl_src_expected.s_reg_low); ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low)); rl_src_expected.location = kLocDalvikFrame; } if (new_value_is_core_reg) { - FlushRegWide(rl_src_new_value.low_reg, rl_src_new_value.high_reg); + FlushRegWide(rl_src_new_value.reg.GetReg(), rl_src_new_value.reg.GetHighReg()); ClobberSReg(rl_src_new_value.s_reg_low); ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low)); rl_src_new_value.location = kLocDalvikFrame; @@ -627,19 +627,19 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) { // Mark card for object assuming new value is stored. - MarkGCCard(rl_new_value.low_reg, rl_object.low_reg); + MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg()); } RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); int r_ptr = rARM_LR; - OpRegRegReg(kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg); + OpRegRegReg(kOpAdd, r_ptr, rl_object.reg.GetReg(), rl_offset.reg.GetReg()); // Free now unneeded rl_object and rl_offset to give more temps. ClobberSReg(rl_object.s_reg_low); - FreeTemp(rl_object.low_reg); + FreeTemp(rl_object.reg.GetReg()); ClobberSReg(rl_offset.s_reg_low); - FreeTemp(rl_offset.low_reg); + FreeTemp(rl_offset.reg.GetReg()); RegLocation rl_expected; if (!is_long) { @@ -647,8 +647,11 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { } else if (load_early) { rl_expected = LoadValueWide(rl_src_expected, kCoreReg); } else { - rl_new_value.low_reg = rl_expected.low_reg = AllocTemp(); - rl_new_value.high_reg = rl_expected.high_reg = AllocTemp(); + // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs. + int low_reg = AllocTemp(); + int high_reg = AllocTemp(); + rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg); + rl_expected = rl_new_value; } // do { @@ -662,13 +665,13 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { if (is_long) { int r_tmp_high = AllocTemp(); if (!load_early) { - LoadValueDirectWide(rl_src_expected, rl_expected.low_reg, rl_expected.high_reg); + LoadValueDirectWide(rl_src_expected, rl_expected.reg.GetReg(), rl_expected.reg.GetHighReg()); } NewLIR3(kThumb2Ldrexd, r_tmp, r_tmp_high, r_ptr); - OpRegReg(kOpSub, r_tmp, rl_expected.low_reg); - OpRegReg(kOpSub, r_tmp_high, rl_expected.high_reg); + OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg()); + OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHighReg()); if (!load_early) { - LoadValueDirectWide(rl_src_new_value, rl_new_value.low_reg, rl_new_value.high_reg); + LoadValueDirectWide(rl_src_new_value, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg()); } // Make sure we use ORR that sets the ccode if (ARM_LOWREG(r_tmp) && ARM_LOWREG(r_tmp_high)) { @@ -680,14 +683,14 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE); OpIT(kCondEq, "T"); - NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.low_reg, rl_new_value.high_reg, r_ptr); + NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg(), r_ptr); } else { NewLIR3(kThumb2Ldrex, r_tmp, r_ptr, 0); - OpRegReg(kOpSub, r_tmp, rl_expected.low_reg); + OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg()); DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE); OpIT(kCondEq, "T"); - NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.low_reg, r_ptr, 0); + NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.reg.GetReg(), r_ptr, 0); } // Still one conditional left from OpIT(kCondEq, "T") from either branch @@ -695,16 +698,16 @@ bool ArmMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { OpCondBranch(kCondEq, target); if (!load_early) { - FreeTemp(rl_expected.low_reg); // Now unneeded. - FreeTemp(rl_expected.high_reg); // Now unneeded. + FreeTemp(rl_expected.reg.GetReg()); // Now unneeded. + FreeTemp(rl_expected.reg.GetHighReg()); // Now unneeded. } // result := (tmp1 != 0) ? 0 : 1; RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegImm(kOpRsub, rl_result.low_reg, r_tmp, 1); + OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), r_tmp, 1); DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE); OpIT(kCondUlt, ""); - LoadConstant(rl_result.low_reg, 0); /* cc */ + LoadConstant(rl_result.reg.GetReg(), 0); /* cc */ FreeTemp(r_tmp); // Now unneeded. StoreValue(rl_dest, rl_result); @@ -730,10 +733,10 @@ LIR* ArmMir2Lir::OpVstm(int rBase, int count) { void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, int first_bit, int second_bit) { - OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg, + OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(), EncodeShift(kArmLsl, second_bit - first_bit)); if (first_bit != 0) { - OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit); + OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit); } } @@ -782,14 +785,14 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { int z_reg = AllocTemp(); LoadConstantNoClobber(z_reg, 0); // Check for destructive overlap - if (rl_result.low_reg == rl_src.high_reg) { + if (rl_result.reg.GetReg() == rl_src.reg.GetHighReg()) { int t_reg = AllocTemp(); - OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg); - OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, t_reg); + OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg()); + OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, t_reg); FreeTemp(t_reg); } else { - OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg); - OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg); + OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg()); + OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, rl_src.reg.GetHighReg()); } FreeTemp(z_reg); StoreValueWide(rl_dest, rl_result); @@ -827,41 +830,41 @@ void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest, bool special_case = true; // If operands are the same, or any pair has been promoted we're not the special case. if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || - (!IsTemp(rl_src1.low_reg) && !IsTemp(rl_src1.high_reg)) || - (!IsTemp(rl_src2.low_reg) && !IsTemp(rl_src2.high_reg))) { + (!IsTemp(rl_src1.reg.GetReg()) && !IsTemp(rl_src1.reg.GetHighReg())) || + (!IsTemp(rl_src2.reg.GetReg()) && !IsTemp(rl_src2.reg.GetHighReg()))) { special_case = false; } // Tuning: if rl_dest has been promoted and is *not* either operand, could use directly. int res_lo = AllocTemp(); int res_hi; - if (rl_src1.low_reg == rl_src2.low_reg) { + if (rl_src1.reg.GetReg() == rl_src2.reg.GetReg()) { res_hi = AllocTemp(); - NewLIR3(kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg); - NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg); + NewLIR3(kThumb2MulRRR, tmp1, rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()); + NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.reg.GetReg(), rl_src1.reg.GetReg()); OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1)); } else { // In the special case, all temps are now allocated - NewLIR3(kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg); + NewLIR3(kThumb2MulRRR, tmp1, rl_src2.reg.GetReg(), rl_src1.reg.GetHighReg()); if (special_case) { - DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg); - DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg); - FreeTemp(rl_src1.high_reg); + DCHECK_NE(rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); + DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg()); + FreeTemp(rl_src1.reg.GetHighReg()); } res_hi = AllocTemp(); - NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg); - NewLIR4(kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1); + NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.reg.GetReg(), rl_src1.reg.GetReg()); + NewLIR4(kThumb2Mla, tmp1, rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg(), tmp1); NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0); if (special_case) { - FreeTemp(rl_src1.low_reg); - Clobber(rl_src1.low_reg); - Clobber(rl_src1.high_reg); + FreeTemp(rl_src1.reg.GetReg()); + Clobber(rl_src1.reg.GetReg()); + Clobber(rl_src1.reg.GetHighReg()); } } FreeTemp(tmp1); rl_result = GetReturnWide(false); // Just using as a template. - rl_result.low_reg = res_lo; - rl_result.high_reg = res_hi; + rl_result.reg.SetReg(res_lo); + rl_result.reg.SetHighReg(res_hi); StoreValueWide(rl_dest, rl_result); // Now, restore lr to its non-temp status. Clobber(rARM_LR); @@ -920,25 +923,25 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, } /* null object? */ - GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags); + GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags); bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); int reg_len = INVALID_REG; if (needs_range_check) { reg_len = AllocTemp(); /* Get len */ - LoadWordDisp(rl_array.low_reg, len_offset, reg_len); + LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len); } if (rl_dest.wide || rl_dest.fp || constant_index) { int reg_ptr; if (constant_index) { - reg_ptr = rl_array.low_reg; // NOTE: must not alter reg_ptr in constant case. + reg_ptr = rl_array.reg.GetReg(); // NOTE: must not alter reg_ptr in constant case. } else { // No special indexed operation, lea + load w/ displacement reg_ptr = AllocTemp(); - OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg, + OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(), EncodeShift(kArmLsl, scale)); - FreeTemp(rl_index.low_reg); + FreeTemp(rl_index.reg.GetReg()); } rl_result = EvalLoc(rl_dest, reg_class, true); @@ -946,18 +949,18 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, if (constant_index) { GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds); } else { - GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds); + GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds); } FreeTemp(reg_len); } if (rl_dest.wide) { - LoadBaseDispWide(reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG); + LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG); if (!constant_index) { FreeTemp(reg_ptr); } StoreValueWide(rl_dest, rl_result); } else { - LoadBaseDisp(reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG); + LoadBaseDisp(reg_ptr, data_offset, rl_result.reg.GetReg(), size, INVALID_SREG); if (!constant_index) { FreeTemp(reg_ptr); } @@ -966,15 +969,15 @@ void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, } else { // Offset base, then use indexed load int reg_ptr = AllocTemp(); - OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset); - FreeTemp(rl_array.low_reg); + OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset); + FreeTemp(rl_array.reg.GetReg()); rl_result = EvalLoc(rl_dest, reg_class, true); if (needs_range_check) { - GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds); + GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds); FreeTemp(reg_len); } - LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size); + LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size); FreeTemp(reg_ptr); StoreValue(rl_dest, rl_result); } @@ -1010,17 +1013,17 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, int reg_ptr; bool allocated_reg_ptr_temp = false; if (constant_index) { - reg_ptr = rl_array.low_reg; - } else if (IsTemp(rl_array.low_reg) && !card_mark) { - Clobber(rl_array.low_reg); - reg_ptr = rl_array.low_reg; + reg_ptr = rl_array.reg.GetReg(); + } else if (IsTemp(rl_array.reg.GetReg()) && !card_mark) { + Clobber(rl_array.reg.GetReg()); + reg_ptr = rl_array.reg.GetReg(); } else { allocated_reg_ptr_temp = true; reg_ptr = AllocTemp(); } /* null object? */ - GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags); + GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags); bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); int reg_len = INVALID_REG; @@ -1028,7 +1031,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, reg_len = AllocTemp(); // NOTE: max live temps(4) here. /* Get len */ - LoadWordDisp(rl_array.low_reg, len_offset, reg_len); + LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len); } /* at this point, reg_ptr points to array, 2 live temps */ if (rl_src.wide || rl_src.fp || constant_index) { @@ -1038,39 +1041,39 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, rl_src = LoadValue(rl_src, reg_class); } if (!constant_index) { - OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg, + OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(), EncodeShift(kArmLsl, scale)); } if (needs_range_check) { if (constant_index) { GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds); } else { - GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds); + GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds); } FreeTemp(reg_len); } if (rl_src.wide) { - StoreBaseDispWide(reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg); + StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); } else { - StoreBaseDisp(reg_ptr, data_offset, rl_src.low_reg, size); + StoreBaseDisp(reg_ptr, data_offset, rl_src.reg.GetReg(), size); } } else { /* reg_ptr -> array data */ - OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset); + OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset); rl_src = LoadValue(rl_src, reg_class); if (needs_range_check) { - GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds); + GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds); FreeTemp(reg_len); } - StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg, + StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(), scale, size); } if (allocated_reg_ptr_temp) { FreeTemp(reg_ptr); } if (card_mark) { - MarkGCCard(rl_src.low_reg, rl_array.low_reg); + MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg()); } } @@ -1093,53 +1096,53 @@ void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, case Instruction::SHL_LONG: case Instruction::SHL_LONG_2ADDR: if (shift_amount == 1) { - OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg); - OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg); + OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg()); + OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), rl_src.reg.GetHighReg()); } else if (shift_amount == 32) { - OpRegCopy(rl_result.high_reg, rl_src.low_reg); - LoadConstant(rl_result.low_reg, 0); + OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg()); + LoadConstant(rl_result.reg.GetReg(), 0); } else if (shift_amount > 31) { - OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32); - LoadConstant(rl_result.low_reg, 0); + OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), shift_amount - 32); + LoadConstant(rl_result.reg.GetReg(), 0); } else { - OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount); - OpRegRegRegShift(kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg, + OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount); + OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), EncodeShift(kArmLsr, 32 - shift_amount)); - OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount); + OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), shift_amount); } break; case Instruction::SHR_LONG: case Instruction::SHR_LONG_2ADDR: if (shift_amount == 32) { - OpRegCopy(rl_result.low_reg, rl_src.high_reg); - OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg()); + OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31); } else if (shift_amount > 31) { - OpRegRegImm(kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32); - OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31); + OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32); + OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31); } else { int t_reg = AllocTemp(); - OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount); - OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg, + OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount); + OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(), EncodeShift(kArmLsl, 32 - shift_amount)); FreeTemp(t_reg); - OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount); + OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount); } break; case Instruction::USHR_LONG: case Instruction::USHR_LONG_2ADDR: if (shift_amount == 32) { - OpRegCopy(rl_result.low_reg, rl_src.high_reg); - LoadConstant(rl_result.high_reg, 0); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg()); + LoadConstant(rl_result.reg.GetHighReg(), 0); } else if (shift_amount > 31) { - OpRegRegImm(kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32); - LoadConstant(rl_result.high_reg, 0); + OpRegRegImm(kOpLsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32); + LoadConstant(rl_result.reg.GetHighReg(), 0); } else { int t_reg = AllocTemp(); - OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount); - OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg, + OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount); + OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(), EncodeShift(kArmLsl, 32 - shift_amount)); FreeTemp(t_reg); - OpRegRegImm(kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount); + OpRegRegImm(kOpLsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount); } break; default: @@ -1194,36 +1197,36 @@ void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode, switch (opcode) { case Instruction::ADD_LONG: case Instruction::ADD_LONG_2ADDR: - NewLIR3(kThumb2AddRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo); - NewLIR3(kThumb2AdcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi); + NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo); + NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi); break; case Instruction::OR_LONG: case Instruction::OR_LONG_2ADDR: - if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) { - OpRegRegImm(kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo); + if ((val_lo != 0) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) { + OpRegRegImm(kOpOr, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo); } - if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) { - OpRegRegImm(kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi); + if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) { + OpRegRegImm(kOpOr, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi); } break; case Instruction::XOR_LONG: case Instruction::XOR_LONG_2ADDR: - OpRegRegImm(kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo); - OpRegRegImm(kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi); + OpRegRegImm(kOpXor, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo); + OpRegRegImm(kOpXor, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi); break; case Instruction::AND_LONG: case Instruction::AND_LONG_2ADDR: - if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) { - OpRegRegImm(kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo); + if ((val_lo != 0xffffffff) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) { + OpRegRegImm(kOpAnd, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo); } - if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) { - OpRegRegImm(kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi); + if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) { + OpRegRegImm(kOpAnd, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi); } break; case Instruction::SUB_LONG_2ADDR: case Instruction::SUB_LONG: - NewLIR3(kThumb2SubRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo); - NewLIR3(kThumb2SbcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi); + NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo); + NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi); break; default: LOG(FATAL) << "Unexpected opcode " << opcode; diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc index 83431ad235..ab1a053489 100644 --- a/compiler/dex/quick/arm/target_arm.cc +++ b/compiler/dex/quick/arm/target_arm.cc @@ -37,23 +37,19 @@ static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15}; RegLocation ArmMir2Lir::LocCReturn() { - RegLocation res = ARM_LOC_C_RETURN; - return res; + return arm_loc_c_return; } RegLocation ArmMir2Lir::LocCReturnWide() { - RegLocation res = ARM_LOC_C_RETURN_WIDE; - return res; + return arm_loc_c_return_wide; } RegLocation ArmMir2Lir::LocCReturnFloat() { - RegLocation res = ARM_LOC_C_RETURN_FLOAT; - return res; + return arm_loc_c_return_float; } RegLocation ArmMir2Lir::LocCReturnDouble() { - RegLocation res = ARM_LOC_C_RETURN_DOUBLE; - return res; + return arm_loc_c_return_double; } // Return a target-dependent special register. @@ -530,14 +526,10 @@ Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, return new ArmMir2Lir(cu, mir_graph, arena); } -/* - * Alloc a pair of core registers, or a double. Low reg in low byte, - * high reg in next byte. - */ -int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) { +// Alloc a pair of core registers, or a double. +RegStorage ArmMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) { int high_reg; int low_reg; - int res = 0; if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) { low_reg = AllocTempDouble(); @@ -546,8 +538,7 @@ int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) { low_reg = AllocTemp(); high_reg = AllocTemp(); } - res = (low_reg & 0xff) | ((high_reg & 0xff) << 8); - return res; + return RegStorage(RegStorage::k64BitPair, low_reg, high_reg); } int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) { @@ -594,11 +585,11 @@ void ArmMir2Lir::CompilerInitializeRegAlloc() { void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) { - if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) && - (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) { + if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) && + (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) { // No overlap, free both - FreeTemp(rl_free.low_reg); - FreeTemp(rl_free.high_reg); + FreeTemp(rl_free.reg.GetReg()); + FreeTemp(rl_free.reg.GetHighReg()); } } /* @@ -697,19 +688,19 @@ void ArmMir2Lir::ClobberCallerSave() { RegLocation ArmMir2Lir::GetReturnWideAlt() { RegLocation res = LocCReturnWide(); - res.low_reg = r2; - res.high_reg = r3; + res.reg.SetReg(r2); + res.reg.SetHighReg(r3); Clobber(r2); Clobber(r3); MarkInUse(r2); MarkInUse(r3); - MarkPair(res.low_reg, res.high_reg); + MarkPair(res.reg.GetReg(), res.reg.GetHighReg()); return res; } RegLocation ArmMir2Lir::GetReturnAlt() { RegLocation res = LocCReturn(); - res.low_reg = r1; + res.reg.SetReg(r1); Clobber(r1); MarkInUse(r1); return res; diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 0533fbfcd7..06f2010396 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -128,12 +128,12 @@ void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, if ((rl_temp.location == kLocDalvikFrame) && InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) { // OK - convert this to a compare immediate and branch - OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken); + OpCmpImmBranch(cond, rl_src1.reg.GetReg(), mir_graph_->ConstantValue(rl_src2), taken); return; } } rl_src2 = LoadValue(rl_src2, kCoreReg); - OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken); + OpCmpBranch(cond, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), taken); } void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken, @@ -163,17 +163,17 @@ void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_s cond = static_cast<ConditionCode>(0); LOG(FATAL) << "Unexpected opcode " << opcode; } - OpCmpImmBranch(cond, rl_src.low_reg, 0, taken); + OpCmpImmBranch(cond, rl_src.reg.GetReg(), 0, taken); } void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (rl_src.location == kLocPhysReg) { - OpRegCopy(rl_result.low_reg, rl_src.low_reg); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg()); } else { - LoadValueDirect(rl_src, rl_result.low_reg); + LoadValueDirect(rl_src, rl_result.reg.GetReg()); } - OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31); + OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), 31); StoreValueWide(rl_dest, rl_result); } @@ -195,7 +195,7 @@ void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, default: LOG(ERROR) << "Bad int conversion type"; } - OpRegReg(op, rl_result.low_reg, rl_src.low_reg); + OpRegReg(op, rl_result.reg.GetReg(), rl_src.reg.GetReg()); StoreValue(rl_dest, rl_result); } @@ -290,7 +290,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { RegLocation loc = UpdateLoc(info->args[i]); if (loc.location == kLocPhysReg) { StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), - loc.low_reg, kWord); + loc.reg.GetReg(), kWord); } } /* @@ -341,10 +341,10 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { RegLocation rl_arg = LoadValue(info->args[i], kCoreReg); StoreBaseDisp(TargetReg(kRet0), mirror::Array::DataOffset(component_size).Int32Value() + - i * 4, rl_arg.low_reg, kWord); + i * 4, rl_arg.reg.GetReg(), kWord); // If the LoadValue caused a temp to be allocated, free it - if (IsTemp(rl_arg.low_reg)) { - FreeTemp(rl_arg.low_reg); + if (IsTemp(rl_arg.reg.GetReg())) { + FreeTemp(rl_arg.reg.GetReg()); } } } @@ -398,10 +398,10 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do // Fast path, static storage base is this method's class RegLocation rl_method = LoadCurrMethod(); r_base = AllocTemp(); - LoadWordDisp(rl_method.low_reg, + LoadWordDisp(rl_method.reg.GetReg(), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); - if (IsTemp(rl_method.low_reg)) { - FreeTemp(rl_method.low_reg); + if (IsTemp(rl_method.reg.GetReg())) { + FreeTemp(rl_method.reg.GetReg()); } } else { // Medium path, static storage base in a different class which requires checks that the other @@ -453,16 +453,16 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do GenMemBarrier(kStoreStore); } if (is_long_or_double) { - StoreBaseDispWide(r_base, field_offset, rl_src.low_reg, - rl_src.high_reg); + StoreBaseDispWide(r_base, field_offset, rl_src.reg.GetReg(), + rl_src.reg.GetHighReg()); } else { - StoreWordDisp(r_base, field_offset, rl_src.low_reg); + StoreWordDisp(r_base, field_offset, rl_src.reg.GetReg()); } if (is_volatile) { GenMemBarrier(kStoreLoad); } if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { - MarkGCCard(rl_src.low_reg, r_base); + MarkGCCard(rl_src.reg.GetReg(), r_base); } FreeTemp(r_base); } else { @@ -492,7 +492,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, // Fast path, static storage base is this method's class RegLocation rl_method = LoadCurrMethod(); r_base = AllocTemp(); - LoadWordDisp(rl_method.low_reg, + LoadWordDisp(rl_method.reg.GetReg(), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base); } else { // Medium path, static storage base in a different class which requires checks that the other @@ -539,10 +539,10 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, GenMemBarrier(kLoadLoad); } if (is_long_or_double) { - LoadBaseDispWide(r_base, field_offset, rl_result.low_reg, - rl_result.high_reg, INVALID_SREG); + LoadBaseDispWide(r_base, field_offset, rl_result.reg.GetReg(), + rl_result.reg.GetHighReg(), INVALID_SREG); } else { - LoadWordDisp(r_base, field_offset, rl_result.low_reg); + LoadWordDisp(r_base, field_offset, rl_result.reg.GetReg()); } FreeTemp(r_base); if (is_long_or_double) { @@ -713,20 +713,20 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, rl_obj = LoadValue(rl_obj, kCoreReg); if (is_long_or_double) { DCHECK(rl_dest.wide); - GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); + GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags); if (cu_->instruction_set == kX86) { rl_result = EvalLoc(rl_dest, reg_class, true); - GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); - LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg, - rl_result.high_reg, rl_obj.s_reg_low); + GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags); + LoadBaseDispWide(rl_obj.reg.GetReg(), field_offset, rl_result.reg.GetReg(), + rl_result.reg.GetHighReg(), rl_obj.s_reg_low); if (is_volatile) { GenMemBarrier(kLoadLoad); } } else { int reg_ptr = AllocTemp(); - OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset); + OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_offset); rl_result = EvalLoc(rl_dest, reg_class, true); - LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG); + LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG); if (is_volatile) { GenMemBarrier(kLoadLoad); } @@ -735,8 +735,8 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, StoreValueWide(rl_dest, rl_result); } else { rl_result = EvalLoc(rl_dest, reg_class, true); - GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); - LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg, + GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags); + LoadBaseDisp(rl_obj.reg.GetReg(), field_offset, rl_result.reg.GetReg(), kWord, rl_obj.s_reg_low); if (is_volatile) { GenMemBarrier(kLoadLoad); @@ -773,29 +773,29 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, if (is_long_or_double) { int reg_ptr; rl_src = LoadValueWide(rl_src, kAnyReg); - GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); + GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags); reg_ptr = AllocTemp(); - OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset); + OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_offset); if (is_volatile) { GenMemBarrier(kStoreStore); } - StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg); + StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); if (is_volatile) { GenMemBarrier(kLoadLoad); } FreeTemp(reg_ptr); } else { rl_src = LoadValue(rl_src, reg_class); - GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags); + GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags); if (is_volatile) { GenMemBarrier(kStoreStore); } - StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord); + StoreBaseDisp(rl_obj.reg.GetReg(), field_offset, rl_src.reg.GetReg(), kWord); if (is_volatile) { GenMemBarrier(kLoadLoad); } if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) { - MarkGCCard(rl_src.low_reg, rl_obj.low_reg); + MarkGCCard(rl_src.reg.GetReg(), rl_obj.reg.GetReg()); } } } else { @@ -829,23 +829,23 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { // Call out to helper which resolves type and verifies access. // Resolved type returned in kRet0. CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), - type_idx, rl_method.low_reg, true); + type_idx, rl_method.reg.GetReg(), true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); } else { // We're don't need access checks, load type from dex cache int32_t dex_cache_offset = mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); - LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg); + LoadWordDisp(rl_method.reg.GetReg(), dex_cache_offset, res_reg); int32_t offset_of_type = mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) * type_idx); - LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg); + LoadWordDisp(res_reg, offset_of_type, rl_result.reg.GetReg()); if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx) || SLOW_TYPE_PATH) { // Slow path, at runtime test if type is null and if so initialize FlushAllRegs(); - LIR* branch = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL); + LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg.GetReg(), 0, NULL); LIR* cont = NewLIR0(kPseudoTargetLabel); // Object to generate the slow path for class resolution. @@ -861,8 +861,8 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { GenerateTargetLabel(); m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_, - rl_method_.low_reg, true); - m2l_->OpRegCopy(rl_result_.low_reg, m2l_->TargetReg(kRet0)); + rl_method_.reg.GetReg(), true); + m2l_->OpRegCopy(rl_result_.reg.GetReg(), m2l_->TargetReg(kRet0)); m2l_->OpUnconditionalBranch(cont_); } @@ -900,8 +900,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { int r_method; if (rl_method.location == kLocPhysReg) { // A temp would conflict with register use below. - DCHECK(!IsTemp(rl_method.low_reg)); - r_method = rl_method.low_reg; + DCHECK(!IsTemp(rl_method.reg.GetReg())); + r_method = rl_method.reg.GetReg(); } else { r_method = TargetReg(kArg2); LoadCurrMethodDirect(r_method); @@ -960,9 +960,9 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { RegLocation rl_method = LoadCurrMethod(); int res_reg = AllocTemp(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - LoadWordDisp(rl_method.low_reg, + LoadWordDisp(rl_method.reg.GetReg(), mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg); - LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg); + LoadWordDisp(res_reg, offset_of_string, rl_result.reg.GetReg()); StoreValue(rl_dest, rl_result); } } @@ -1035,12 +1035,12 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re RegLocation object = LoadValue(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - int result_reg = rl_result.low_reg; - if (result_reg == object.low_reg) { + int result_reg = rl_result.reg.GetReg(); + if (result_reg == object.reg.GetReg()) { result_reg = AllocTypedTemp(false, kCoreReg); } LoadConstant(result_reg, 0); // assume false - LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL); + LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL); int check_class = AllocTypedTemp(false, kCoreReg); int object_class = AllocTypedTemp(false, kCoreReg); @@ -1049,11 +1049,11 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re if (use_declaring_class) { LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); - LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class); + LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class); } else { LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), check_class); - LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class); + LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class); int32_t offset_of_type = mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*) * type_idx); @@ -1077,7 +1077,7 @@ void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, Re FreeTemp(object_class); FreeTemp(check_class); if (IsTemp(result_reg)) { - OpRegCopy(rl_result.low_reg, result_reg); + OpRegCopy(rl_result.reg.GetReg(), result_reg); FreeTemp(result_reg); } StoreValue(rl_dest, rl_result); @@ -1133,7 +1133,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know RegLocation rl_result = GetReturn(false); if (cu_->instruction_set == kMips) { // On MIPS rArg0 != rl_result, place false in result if branch is taken. - LoadConstant(rl_result.low_reg, 0); + LoadConstant(rl_result.reg.GetReg(), 0); } LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); @@ -1147,12 +1147,12 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (cu_->instruction_set == kThumb2) { OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? OpIT(kCondEq, "E"); // if-convert the test - LoadConstant(rl_result.low_reg, 1); // .eq case - load true - LoadConstant(rl_result.low_reg, 0); // .ne case - load false + LoadConstant(rl_result.reg.GetReg(), 1); // .eq case - load true + LoadConstant(rl_result.reg.GetReg(), 0); // .ne case - load false } else { - LoadConstant(rl_result.low_reg, 0); // ne case - load false + LoadConstant(rl_result.reg.GetReg(), 0); // ne case - load false branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL); - LoadConstant(rl_result.low_reg, 1); // eq case - load true + LoadConstant(rl_result.reg.GetReg(), 1); // eq case - load true } } else { if (cu_->instruction_set == kThumb2) { @@ -1169,7 +1169,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know } else { if (!type_known_abstract) { /* Uses branchovers */ - LoadConstant(rl_result.low_reg, 1); // assume true + LoadConstant(rl_result.reg.GetReg(), 1); // assume true branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); } int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); @@ -1355,16 +1355,16 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des rl_src2 = LoadValueWide(rl_src2, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); // The longs may overlap - use intermediate temp if so - if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) { + if ((rl_result.reg.GetReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg())) { int t_reg = AllocTemp(); - OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg); - OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg); - OpRegCopy(rl_result.low_reg, t_reg); + OpRegRegReg(first_op, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); + OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg()); + OpRegCopy(rl_result.reg.GetReg(), t_reg); FreeTemp(t_reg); } else { - OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg); - OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, - rl_src2.high_reg); + OpRegRegReg(first_op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); + OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), + rl_src2.reg.GetHighReg()); } /* * NOTE: If rl_dest refers to a frame variable in a large frame, the @@ -1487,22 +1487,22 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, if (unary) { rl_src1 = LoadValue(rl_src1, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegReg(op, rl_result.low_reg, rl_src1.low_reg); + OpRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg()); } else { if (shift_op) { int t_reg = INVALID_REG; rl_src2 = LoadValue(rl_src2, kCoreReg); t_reg = AllocTemp(); - OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31); + OpRegRegImm(kOpAnd, t_reg, rl_src2.reg.GetReg(), 31); rl_src1 = LoadValue(rl_src1, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg); + OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), t_reg); FreeTemp(t_reg); } else { rl_src1 = LoadValue(rl_src1, kCoreReg); rl_src2 = LoadValue(rl_src2, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg); + OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); } } StoreValue(rl_dest, rl_result); @@ -1512,9 +1512,9 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, rl_src1 = LoadValue(rl_src1, kCoreReg); rl_src2 = LoadValue(rl_src2, kCoreReg); if (check_zero) { - GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero); + GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero); } - rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); + rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv); done = true; } else if (cu_->instruction_set == kThumb2) { if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) { @@ -1523,9 +1523,9 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, rl_src1 = LoadValue(rl_src1, kCoreReg); rl_src2 = LoadValue(rl_src2, kCoreReg); if (check_zero) { - GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero); + GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero); } - rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); + rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv); done = true; } } @@ -1585,29 +1585,29 @@ bool Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, int t_reg = AllocTemp(); if (lit == 2) { // Division by 2 is by far the most common division by constant. - OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k); - OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg); - OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k); + OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), 32 - k); + OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg()); + OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k); } else { - OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31); + OpRegRegImm(kOpAsr, t_reg, rl_src.reg.GetReg(), 31); OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k); - OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg); - OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k); + OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg()); + OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k); } } else { int t_reg1 = AllocTemp(); int t_reg2 = AllocTemp(); if (lit == 2) { - OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k); - OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg); + OpRegRegImm(kOpLsr, t_reg1, rl_src.reg.GetReg(), 32 - k); + OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg()); OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1); - OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1); + OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1); } else { - OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31); + OpRegRegImm(kOpAsr, t_reg1, rl_src.reg.GetReg(), 31); OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k); - OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg); + OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg()); OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1); - OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1); + OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1); } } StoreValue(rl_dest, rl_result); @@ -1637,7 +1637,7 @@ bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int li RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (power_of_two) { // Shift. - OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit)); + OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), LowestSetBit(lit)); } else if (pop_count_le2) { // Shift and add and shift. int first_bit = LowestSetBit(lit); @@ -1648,8 +1648,8 @@ bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int li DCHECK(power_of_two_minus_one); // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1) int t_reg = AllocTemp(); - OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1)); - OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg); + OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), LowestSetBit(lit + 1)); + OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetReg()); } StoreValue(rl_dest, rl_result); return true; @@ -1668,10 +1668,10 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re rl_src = LoadValue(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); if (cu_->instruction_set == kThumb2) { - OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit); + OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit); } else { - OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg); - OpRegImm(kOpAdd, rl_result.low_reg, lit); + OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg()); + OpRegImm(kOpAdd, rl_result.reg.GetReg(), lit); } StoreValue(rl_dest, rl_result); return; @@ -1764,7 +1764,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re bool done = false; if (cu_->instruction_set == kMips) { rl_src = LoadValue(rl_src, kCoreReg); - rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div); + rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div); done = true; } else if (cu_->instruction_set == kX86) { rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); @@ -1774,7 +1774,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re // Use ARM SDIV instruction for division. For remainder we also need to // calculate using a MUL and subtract. rl_src = LoadValue(rl_src, kCoreReg); - rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div); + rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div); done = true; } } @@ -1800,9 +1800,9 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re rl_result = EvalLoc(rl_dest, kCoreReg, true); // Avoid shifts by literal 0 - no support in Thumb. Change to copy. if (shift_op && (lit == 0)) { - OpRegCopy(rl_result.low_reg, rl_src.low_reg); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg()); } else { - OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit); + OpRegRegImm(op, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit); } StoreValue(rl_dest, rl_result); } @@ -1822,15 +1822,15 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, rl_src2 = LoadValueWide(rl_src2, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); // Check for destructive overlap - if (rl_result.low_reg == rl_src2.high_reg) { + if (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg()) { int t_reg = AllocTemp(); - OpRegCopy(t_reg, rl_src2.high_reg); - OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg); - OpRegReg(kOpMvn, rl_result.high_reg, t_reg); + OpRegCopy(t_reg, rl_src2.reg.GetHighReg()); + OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg()); + OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), t_reg); FreeTemp(t_reg); } else { - OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg); - OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg); + OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg()); + OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), rl_src2.reg.GetHighReg()); } StoreValueWide(rl_dest, rl_result); return; @@ -2003,7 +2003,7 @@ void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { /* Generic code for generating a wide constant into a VR. */ void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) { RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true); - LoadConstantWide(rl_result.low_reg, rl_result.high_reg, value); + LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), value); StoreValueWide(rl_dest, rl_result); } diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 35d193c65c..dd3d466d94 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -284,9 +284,9 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) { */ RegLocation rl_src = rl_method; rl_src.location = kLocPhysReg; - rl_src.low_reg = TargetReg(kArg0); + rl_src.reg = RegStorage(RegStorage::k32BitSolo, TargetReg(kArg0)); rl_src.home = false; - MarkLive(rl_src.low_reg, rl_src.s_reg_low); + MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low); StoreValue(rl_method, rl_src); // If Method* has been promoted, explicitly flush if (rl_method.location == kLocPhysReg) { @@ -680,7 +680,7 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, // Wide spans, we need the 2nd half of uses[2]. rl_arg = UpdateLocWide(rl_use2); if (rl_arg.location == kLocPhysReg) { - reg = rl_arg.high_reg; + reg = rl_arg.reg.GetHighReg(); } else { // kArg2 & rArg3 can safely be used here reg = TargetReg(kArg3); @@ -701,8 +701,10 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, rl_arg = info->args[next_use]; rl_arg = UpdateRawLoc(rl_arg); if (rl_arg.location == kLocPhysReg) { - low_reg = rl_arg.low_reg; - high_reg = rl_arg.high_reg; + low_reg = rl_arg.reg.GetReg(); + if (rl_arg.wide) { + high_reg = rl_arg.reg.GetHighReg(); + } } else { low_reg = TargetReg(kArg2); if (rl_arg.wide) { @@ -775,14 +777,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, loc = UpdateLocWide(loc); if ((next_arg >= 2) && (loc.location == kLocPhysReg)) { StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low), - loc.low_reg, loc.high_reg); + loc.reg.GetReg(), loc.reg.GetHighReg()); } next_arg += 2; } else { loc = UpdateLoc(loc); if ((next_arg >= 3) && (loc.location == kLocPhysReg)) { StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), - loc.low_reg, kWord); + loc.reg.GetReg(), kWord); } next_arg++; } @@ -983,7 +985,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { rl_idx = LoadValue(rl_idx, kCoreReg); } int reg_max; - GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags); + GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags); bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK)); LIR* launch_pad = NULL; int reg_off = INVALID_REG; @@ -993,15 +995,15 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { reg_ptr = AllocTemp(); if (range_check) { reg_max = AllocTemp(); - LoadWordDisp(rl_obj.low_reg, count_offset, reg_max); + LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max); } - LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off); - LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr); + LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off); + LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr); if (range_check) { // Set up a launch pad to allow retry in case of bounds violation */ launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info)); intrinsic_launchpads_.Insert(launch_pad); - OpRegReg(kOpCmp, rl_idx.low_reg, reg_max); + OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max); FreeTemp(reg_max); OpCondBranch(kCondUge, launch_pad); } @@ -1013,33 +1015,33 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info)); intrinsic_launchpads_.Insert(launch_pad); if (rl_idx.is_const) { - OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.low_reg, count_offset, + OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset, mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad); } else { - OpRegMem(kOpCmp, rl_idx.low_reg, rl_obj.low_reg, count_offset); + OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset); OpCondBranch(kCondUge, launch_pad); } } reg_off = AllocTemp(); reg_ptr = AllocTemp(); - LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off); - LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr); + LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off); + LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr); } if (rl_idx.is_const) { OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg)); } else { - OpRegReg(kOpAdd, reg_off, rl_idx.low_reg); + OpRegReg(kOpAdd, reg_off, rl_idx.reg.GetReg()); } - FreeTemp(rl_obj.low_reg); - if (rl_idx.low_reg != INVALID_REG) { - FreeTemp(rl_idx.low_reg); + FreeTemp(rl_obj.reg.GetReg()); + if (rl_idx.location == kLocPhysReg) { + FreeTemp(rl_idx.reg.GetReg()); } RegLocation rl_dest = InlineTarget(info); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (cu_->instruction_set != kX86) { - LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf); + LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg.GetReg(), 1, kUnsignedHalf); } else { - LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.low_reg, + LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg.GetReg(), INVALID_REG, kUnsignedHalf, INVALID_SREG); } FreeTemp(reg_off); @@ -1064,18 +1066,18 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) { rl_obj = LoadValue(rl_obj, kCoreReg); RegLocation rl_dest = InlineTarget(info); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags); - LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg); + GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags); + LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(), rl_result.reg.GetReg()); if (is_empty) { // dst = (dst == 0); if (cu_->instruction_set == kThumb2) { int t_reg = AllocTemp(); - OpRegReg(kOpNeg, t_reg, rl_result.low_reg); - OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg); + OpRegReg(kOpNeg, t_reg, rl_result.reg.GetReg()); + OpRegRegReg(kOpAdc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), t_reg); } else { DCHECK_EQ(cu_->instruction_set, kX86); - OpRegImm(kOpSub, rl_result.low_reg, 1); - OpRegImm(kOpLsr, rl_result.low_reg, 31); + OpRegImm(kOpSub, rl_result.reg.GetReg(), 1); + OpRegImm(kOpLsr, rl_result.reg.GetReg(), 31); } } StoreValue(rl_dest, rl_result); @@ -1092,15 +1094,15 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (size == kLong) { RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg); - int r_i_low = rl_i.low_reg; - if (rl_i.low_reg == rl_result.low_reg) { - // First REV shall clobber rl_result.low_reg, save the value in a temp for the second REV. + int r_i_low = rl_i.reg.GetReg(); + if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) { + // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV. r_i_low = AllocTemp(); - OpRegCopy(r_i_low, rl_i.low_reg); + OpRegCopy(r_i_low, rl_i.reg.GetReg()); } - OpRegReg(kOpRev, rl_result.low_reg, rl_i.high_reg); - OpRegReg(kOpRev, rl_result.high_reg, r_i_low); - if (rl_i.low_reg == rl_result.low_reg) { + OpRegReg(kOpRev, rl_result.reg.GetReg(), rl_i.reg.GetHighReg()); + OpRegReg(kOpRev, rl_result.reg.GetHighReg(), r_i_low); + if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) { FreeTemp(r_i_low); } StoreValueWide(rl_dest, rl_result); @@ -1108,7 +1110,7 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) { DCHECK(size == kWord || size == kSignedHalf); OpKind op = (size == kWord) ? kOpRev : kOpRevsh; RegLocation rl_i = LoadValue(rl_src_i, kCoreReg); - OpRegReg(op, rl_result.low_reg, rl_i.low_reg); + OpRegReg(op, rl_result.reg.GetReg(), rl_i.reg.GetReg()); StoreValue(rl_dest, rl_result); } return true; @@ -1125,9 +1127,9 @@ bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int sign_reg = AllocTemp(); // abs(x) = y<=x>>31, (x+y)^y. - OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31); - OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg); - OpRegReg(kOpXor, rl_result.low_reg, sign_reg); + OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetReg(), 31); + OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg); + OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg); StoreValue(rl_dest, rl_result); return true; } @@ -1144,11 +1146,11 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int sign_reg = AllocTemp(); // abs(x) = y<=x>>31, (x+y)^y. - OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31); - OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg); - OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg); - OpRegReg(kOpXor, rl_result.low_reg, sign_reg); - OpRegReg(kOpXor, rl_result.high_reg, sign_reg); + OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHighReg(), 31); + OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg); + OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), sign_reg); + OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg); + OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg); StoreValueWide(rl_dest, rl_result); return true; } else { @@ -1158,16 +1160,16 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) { rl_src = LoadValueWide(rl_src, kCoreReg); RegLocation rl_dest = InlineTargetWide(info); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg); - FreeTemp(rl_src.low_reg); - FreeTemp(rl_src.high_reg); + OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); + FreeTemp(rl_src.reg.GetReg()); + FreeTemp(rl_src.reg.GetHighReg()); int sign_reg = AllocTemp(); // abs(x) = y<=x>>31, (x+y)^y. - OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31); - OpRegReg(kOpAdd, rl_result.low_reg, sign_reg); - OpRegReg(kOpAdc, rl_result.high_reg, sign_reg); - OpRegReg(kOpXor, rl_result.low_reg, sign_reg); - OpRegReg(kOpXor, rl_result.high_reg, sign_reg); + OpRegRegImm(kOpAsr, sign_reg, rl_result.reg.GetHighReg(), 31); + OpRegReg(kOpAdd, rl_result.reg.GetReg(), sign_reg); + OpRegReg(kOpAdc, rl_result.reg.GetHighReg(), sign_reg); + OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg); + OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg); StoreValueWide(rl_dest, rl_result); return true; } @@ -1184,7 +1186,7 @@ bool Mir2Lir::GenInlinedAbsFloat(CallInfo* info) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int signMask = AllocTemp(); LoadConstant(signMask, 0x7fffffff); - OpRegRegReg(kOpAnd, rl_result.low_reg, rl_src.low_reg, signMask); + OpRegRegReg(kOpAnd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), signMask); FreeTemp(signMask); StoreValue(rl_dest, rl_result); return true; @@ -1199,12 +1201,12 @@ bool Mir2Lir::GenInlinedAbsDouble(CallInfo* info) { rl_src = LoadValueWide(rl_src, kCoreReg); RegLocation rl_dest = InlineTargetWide(info); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg); - FreeTemp(rl_src.low_reg); - FreeTemp(rl_src.high_reg); + OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); + FreeTemp(rl_src.reg.GetReg()); + FreeTemp(rl_src.reg.GetHighReg()); int signMask = AllocTemp(); LoadConstant(signMask, 0x7fffffff); - OpRegReg(kOpAnd, rl_result.high_reg, signMask); + OpRegReg(kOpAnd, rl_result.reg.GetHighReg(), signMask); FreeTemp(signMask); StoreValueWide(rl_dest, rl_result); return true; @@ -1316,10 +1318,10 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); ThreadOffset offset = Thread::PeerOffset(); if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) { - LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg); + LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg.GetReg()); } else { CHECK(cu_->instruction_set == kX86); - reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset); + reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset); } StoreValue(rl_dest, rl_result); return true; @@ -1343,11 +1345,11 @@ bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info, RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (is_long) { - OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg); - LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG); + OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg()); + LoadBaseDispWide(rl_object.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG); StoreValueWide(rl_dest, rl_result); } else { - LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord); + LoadBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_result.reg.GetReg(), 0, kWord); StoreValue(rl_dest, rl_result); } return true; @@ -1372,20 +1374,20 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, RegLocation rl_value; if (is_long) { rl_value = LoadValueWide(rl_src_value, kCoreReg); - OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg); - StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg); + OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg()); + StoreBaseDispWide(rl_object.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg()); } else { rl_value = LoadValue(rl_src_value, kCoreReg); - StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord); + StoreBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_value.reg.GetReg(), 0, kWord); } // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard. - FreeTemp(rl_offset.low_reg); + FreeTemp(rl_offset.reg.GetReg()); if (is_volatile) { GenMemBarrier(kStoreLoad); } if (is_object) { - MarkGCCard(rl_value.low_reg, rl_object.low_reg); + MarkGCCard(rl_value.reg.GetReg(), rl_object.reg.GetReg()); } return true; } diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index f7c2821afd..53c47f5975 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -92,7 +92,7 @@ LIR* Mir2Lir::StoreWordDisp(int rBase, int displacement, int r_src) { void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) { rl_src = UpdateLoc(rl_src); if (rl_src.location == kLocPhysReg) { - OpRegCopy(r_dest, rl_src.low_reg); + OpRegCopy(r_dest, rl_src.reg.GetReg()); } else if (IsInexpensiveConstant(rl_src)) { LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src)); } else { @@ -122,7 +122,7 @@ void Mir2Lir::LoadValueDirectWide(RegLocation rl_src, int reg_lo, int reg_hi) { rl_src = UpdateLocWide(rl_src); if (rl_src.location == kLocPhysReg) { - OpRegCopyWide(reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg); + OpRegCopyWide(reg_lo, reg_hi, rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); } else if (IsInexpensiveConstant(rl_src)) { LoadConstantWide(reg_lo, reg_hi, mir_graph_->ConstantValueWide(rl_src)); } else { @@ -150,9 +150,9 @@ void Mir2Lir::LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) { rl_src = EvalLoc(rl_src, op_kind, false); if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) { - LoadValueDirect(rl_src, rl_src.low_reg); + LoadValueDirect(rl_src, rl_src.reg.GetReg()); rl_src.location = kLocPhysReg; - MarkLive(rl_src.low_reg, rl_src.s_reg_low); + MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low); } return rl_src; } @@ -175,34 +175,34 @@ void Mir2Lir::StoreValue(RegLocation rl_dest, RegLocation rl_src) { rl_src = UpdateLoc(rl_src); rl_dest = UpdateLoc(rl_dest); if (rl_src.location == kLocPhysReg) { - if (IsLive(rl_src.low_reg) || - IsPromoted(rl_src.low_reg) || + if (IsLive(rl_src.reg.GetReg()) || + IsPromoted(rl_src.reg.GetReg()) || (rl_dest.location == kLocPhysReg)) { // Src is live/promoted or Dest has assigned reg. rl_dest = EvalLoc(rl_dest, kAnyReg, false); - OpRegCopy(rl_dest.low_reg, rl_src.low_reg); + OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg()); } else { // Just re-assign the registers. Dest gets Src's regs - rl_dest.low_reg = rl_src.low_reg; - Clobber(rl_src.low_reg); + rl_dest.reg = rl_src.reg; + Clobber(rl_src.reg.GetReg()); } } else { // Load Src either into promoted Dest or temps allocated for Dest rl_dest = EvalLoc(rl_dest, kAnyReg, false); - LoadValueDirect(rl_src, rl_dest.low_reg); + LoadValueDirect(rl_src, rl_dest.reg.GetReg()); } // Dest is now live and dirty (until/if we flush it to home location) - MarkLive(rl_dest.low_reg, rl_dest.s_reg_low); + MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low); MarkDirty(rl_dest); ResetDefLoc(rl_dest); - if (IsDirty(rl_dest.low_reg) && + if (IsDirty(rl_dest.reg.GetReg()) && oat_live_out(rl_dest.s_reg_low)) { def_start = last_lir_insn_; StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), - rl_dest.low_reg, kWord); + rl_dest.reg.GetReg(), kWord); MarkClean(rl_dest); def_end = last_lir_insn_; if (!rl_dest.ref) { @@ -216,10 +216,10 @@ RegLocation Mir2Lir::LoadValueWide(RegLocation rl_src, RegisterClass op_kind) { DCHECK(rl_src.wide); rl_src = EvalLoc(rl_src, op_kind, false); if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) { - LoadValueDirectWide(rl_src, rl_src.low_reg, rl_src.high_reg); + LoadValueDirectWide(rl_src, rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); rl_src.location = kLocPhysReg; - MarkLive(rl_src.low_reg, rl_src.s_reg_low); - MarkLive(rl_src.high_reg, GetSRegHi(rl_src.s_reg_low)); + MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low); + MarkLive(rl_src.reg.GetHighReg(), GetSRegHi(rl_src.s_reg_low)); } return rl_src; } @@ -237,57 +237,57 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) { } LIR* def_start; LIR* def_end; - DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg)); + DCHECK((rl_src.location != kLocPhysReg) || + (IsFpReg(rl_src.reg.GetReg()) == IsFpReg(rl_src.reg.GetHighReg()))); DCHECK(rl_dest.wide); DCHECK(rl_src.wide); if (rl_src.location == kLocPhysReg) { - if (IsLive(rl_src.low_reg) || - IsLive(rl_src.high_reg) || - IsPromoted(rl_src.low_reg) || - IsPromoted(rl_src.high_reg) || + if (IsLive(rl_src.reg.GetReg()) || + IsLive(rl_src.reg.GetHighReg()) || + IsPromoted(rl_src.reg.GetReg()) || + IsPromoted(rl_src.reg.GetHighReg()) || (rl_dest.location == kLocPhysReg)) { // Src is live or promoted or Dest has assigned reg. rl_dest = EvalLoc(rl_dest, kAnyReg, false); - OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, - rl_src.low_reg, rl_src.high_reg); + OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), + rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); } else { // Just re-assign the registers. Dest gets Src's regs - rl_dest.low_reg = rl_src.low_reg; - rl_dest.high_reg = rl_src.high_reg; - Clobber(rl_src.low_reg); - Clobber(rl_src.high_reg); + rl_dest.reg = rl_src.reg; + Clobber(rl_src.reg.GetReg()); + Clobber(rl_src.reg.GetHighReg()); } } else { // Load Src either into promoted Dest or temps allocated for Dest rl_dest = EvalLoc(rl_dest, kAnyReg, false); - LoadValueDirectWide(rl_src, rl_dest.low_reg, rl_dest.high_reg); + LoadValueDirectWide(rl_src, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg()); } // Dest is now live and dirty (until/if we flush it to home location) - MarkLive(rl_dest.low_reg, rl_dest.s_reg_low); + MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low); // Does this wide value live in two registers (or one vector one)? - if (rl_dest.low_reg != rl_dest.high_reg) { - MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low)); + if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) { + MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low)); MarkDirty(rl_dest); - MarkPair(rl_dest.low_reg, rl_dest.high_reg); + MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg()); } else { // This must be an x86 vector register value, - DCHECK(IsFpReg(rl_dest.low_reg) && (cu_->instruction_set == kX86)); + DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86)); MarkDirty(rl_dest); } ResetDefLocWide(rl_dest); - if ((IsDirty(rl_dest.low_reg) || - IsDirty(rl_dest.high_reg)) && + if ((IsDirty(rl_dest.reg.GetReg()) || + IsDirty(rl_dest.reg.GetHighReg())) && (oat_live_out(rl_dest.s_reg_low) || oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) { def_start = last_lir_insn_; DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1), mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low))); StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), - rl_dest.low_reg, rl_dest.high_reg); + rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg()); MarkClean(rl_dest); def_end = last_lir_insn_; MarkDefWide(rl_dest, def_start, def_end); @@ -298,25 +298,25 @@ void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) { DCHECK_EQ(rl_src.location, kLocPhysReg); if (rl_dest.location == kLocPhysReg) { - OpRegCopy(rl_dest.low_reg, rl_src.low_reg); + OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg()); } else { // Just re-assign the register. Dest gets Src's reg. - rl_dest.low_reg = rl_src.low_reg; rl_dest.location = kLocPhysReg; - Clobber(rl_src.low_reg); + rl_dest.reg = rl_src.reg; + Clobber(rl_src.reg.GetReg()); } // Dest is now live and dirty (until/if we flush it to home location) - MarkLive(rl_dest.low_reg, rl_dest.s_reg_low); + MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low); MarkDirty(rl_dest); ResetDefLoc(rl_dest); - if (IsDirty(rl_dest.low_reg) && + if (IsDirty(rl_dest.reg.GetReg()) && oat_live_out(rl_dest.s_reg_low)) { LIR *def_start = last_lir_insn_; StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), - rl_dest.low_reg, kWord); + rl_dest.reg.GetReg(), kWord); MarkClean(rl_dest); LIR *def_end = last_lir_insn_; if (!rl_dest.ref) { @@ -327,46 +327,45 @@ void Mir2Lir::StoreFinalValue(RegLocation rl_dest, RegLocation rl_src) { } void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) { - DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg)); + DCHECK_EQ(IsFpReg(rl_src.reg.GetReg()), IsFpReg(rl_src.reg.GetHighReg())); DCHECK(rl_dest.wide); DCHECK(rl_src.wide); DCHECK_EQ(rl_src.location, kLocPhysReg); if (rl_dest.location == kLocPhysReg) { - OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, rl_src.low_reg, rl_src.high_reg); + OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); } else { // Just re-assign the registers. Dest gets Src's regs. - rl_dest.low_reg = rl_src.low_reg; - rl_dest.high_reg = rl_src.high_reg; rl_dest.location = kLocPhysReg; - Clobber(rl_src.low_reg); - Clobber(rl_src.high_reg); + rl_dest.reg = rl_src.reg; + Clobber(rl_src.reg.GetReg()); + Clobber(rl_src.reg.GetHighReg()); } // Dest is now live and dirty (until/if we flush it to home location). - MarkLive(rl_dest.low_reg, rl_dest.s_reg_low); + MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low); // Does this wide value live in two registers (or one vector one)? - if (rl_dest.low_reg != rl_dest.high_reg) { - MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low)); + if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) { + MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low)); MarkDirty(rl_dest); - MarkPair(rl_dest.low_reg, rl_dest.high_reg); + MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg()); } else { // This must be an x86 vector register value, - DCHECK(IsFpReg(rl_dest.low_reg) && (cu_->instruction_set == kX86)); + DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86)); MarkDirty(rl_dest); } ResetDefLocWide(rl_dest); - if ((IsDirty(rl_dest.low_reg) || - IsDirty(rl_dest.high_reg)) && + if ((IsDirty(rl_dest.reg.GetReg()) || + IsDirty(rl_dest.reg.GetHighReg())) && (oat_live_out(rl_dest.s_reg_low) || oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) { LIR *def_start = last_lir_insn_; DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1), mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low))); StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low), - rl_dest.low_reg, rl_dest.high_reg); + rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg()); MarkClean(rl_dest); LIR *def_end = last_lir_insn_; MarkDefWide(rl_dest, def_start, def_end); @@ -385,14 +384,13 @@ RegLocation Mir2Lir::LoadCurrMethod() { RegLocation Mir2Lir::ForceTemp(RegLocation loc) { DCHECK(!loc.wide); DCHECK(loc.location == kLocPhysReg); - DCHECK(!IsFpReg(loc.low_reg)); - DCHECK(!IsFpReg(loc.high_reg)); - if (IsTemp(loc.low_reg)) { - Clobber(loc.low_reg); + DCHECK(!IsFpReg(loc.reg.GetReg())); + if (IsTemp(loc.reg.GetReg())) { + Clobber(loc.reg.GetReg()); } else { int temp_low = AllocTemp(); - OpRegCopy(temp_low, loc.low_reg); - loc.low_reg = temp_low; + OpRegCopy(temp_low, loc.reg.GetReg()); + loc.reg.SetReg(temp_low); } // Ensure that this doesn't represent the original SR any more. @@ -403,21 +401,21 @@ RegLocation Mir2Lir::ForceTemp(RegLocation loc) { RegLocation Mir2Lir::ForceTempWide(RegLocation loc) { DCHECK(loc.wide); DCHECK(loc.location == kLocPhysReg); - DCHECK(!IsFpReg(loc.low_reg)); - DCHECK(!IsFpReg(loc.high_reg)); - if (IsTemp(loc.low_reg)) { - Clobber(loc.low_reg); + DCHECK(!IsFpReg(loc.reg.GetReg())); + DCHECK(!IsFpReg(loc.reg.GetHighReg())); + if (IsTemp(loc.reg.GetReg())) { + Clobber(loc.reg.GetReg()); } else { int temp_low = AllocTemp(); - OpRegCopy(temp_low, loc.low_reg); - loc.low_reg = temp_low; + OpRegCopy(temp_low, loc.reg.GetReg()); + loc.reg.SetReg(temp_low); } - if (IsTemp(loc.high_reg)) { - Clobber(loc.high_reg); + if (IsTemp(loc.reg.GetHighReg())) { + Clobber(loc.reg.GetHighReg()); } else { int temp_high = AllocTemp(); - OpRegCopy(temp_high, loc.high_reg); - loc.high_reg = temp_high; + OpRegCopy(temp_high, loc.reg.GetHighReg()); + loc.reg.SetHighReg(temp_high); } // Ensure that this doesn't represent the original SR any more. diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index a663519b82..88f46fd59a 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -114,7 +114,7 @@ void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, LIR* exit_branch = OpCmpBranch(kCondEq, rBase, rEnd, NULL); LoadWordDisp(rBase, 0, r_key); OpRegImm(kOpAdd, rBase, 8); - OpCmpBranch(kCondNe, rl_src.low_reg, r_key, loop_label); + OpCmpBranch(kCondNe, rl_src.reg.GetReg(), r_key, loop_label); int r_disp = AllocTemp(); LoadWordDisp(rBase, -4, r_disp); OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp); @@ -162,7 +162,7 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, bool large_bias = false; int r_key; if (low_key == 0) { - r_key = rl_src.low_reg; + r_key = rl_src.reg.GetReg(); } else if ((low_key & 0xffff) != low_key) { r_key = AllocTemp(); LoadConstant(r_key, low_key); @@ -179,9 +179,9 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, NewLIR0(kMipsNop); } else { if (large_bias) { - OpRegRegReg(kOpSub, r_key, rl_src.low_reg, r_key); + OpRegRegReg(kOpSub, r_key, rl_src.reg.GetReg(), r_key); } else { - OpRegRegImm(kOpSub, r_key, rl_src.low_reg, low_key); + OpRegRegImm(kOpSub, r_key, rl_src.reg.GetReg(), low_key); } } GenBarrier(); // Scheduling barrier @@ -263,7 +263,7 @@ void MipsMir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int reset_reg = AllocTemp(); - LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.low_reg); + LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.reg.GetReg()); LoadConstant(reset_reg, 0); StoreWordDisp(rMIPS_SELF, ex_offset, reset_reg); FreeTemp(reset_reg); diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index dad8a3b492..61eb68dc21 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -49,7 +49,7 @@ class MipsMir2Lir : public Mir2Lir { bool IsFpReg(int reg); bool SameRegType(int reg1, int reg2); int AllocTypedTemp(bool fp_hint, int reg_class); - int AllocTypedTempPair(bool fp_hint, int reg_class); + RegStorage AllocTypedTempWide(bool fp_hint, int reg_class); int S2d(int low_reg, int high_reg); int TargetReg(SpecialTargetRegister reg); int GetArgMappingToPhysicalReg(int arg_num); diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc index 9e2fea94de..cf4f19f84c 100644 --- a/compiler/dex/quick/mips/fp_mips.cc +++ b/compiler/dex/quick/mips/fp_mips.cc @@ -64,7 +64,7 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, rl_src1 = LoadValue(rl_src1, kFPReg); rl_src2 = LoadValue(rl_src2, kFPReg); rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg); + NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); StoreValue(rl_dest, rl_result); } @@ -111,8 +111,8 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, rl_result = EvalLoc(rl_dest, kFPReg, true); DCHECK(rl_dest.wide); DCHECK(rl_result.wide); - NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg), - S2d(rl_src2.low_reg, rl_src2.high_reg)); + NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()), + S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg())); StoreValueWide(rl_dest, rl_result); } @@ -157,18 +157,18 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, } if (rl_src.wide) { rl_src = LoadValueWide(rl_src, kFPReg); - src_reg = S2d(rl_src.low_reg, rl_src.high_reg); + src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); } else { rl_src = LoadValue(rl_src, kFPReg); - src_reg = rl_src.low_reg; + src_reg = rl_src.reg.GetReg(); } if (rl_dest.wide) { rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg); + NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg); StoreValueWide(rl_dest, rl_result); } else { rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(op, rl_result.low_reg, src_reg); + NewLIR2(op, rl_result.reg.GetReg(), src_reg); StoreValue(rl_dest, rl_result); } } @@ -221,7 +221,7 @@ void MipsMir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValue(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000); + OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000); StoreValue(rl_dest, rl_result); } @@ -229,8 +229,8 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValueWide(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000); - OpRegCopy(rl_result.low_reg, rl_src.low_reg); + OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg()); StoreValueWide(rl_dest, rl_result); } diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index 013041a9a5..fec801bb4a 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -47,13 +47,13 @@ void MipsMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, int t0 = AllocTemp(); int t1 = AllocTemp(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - NewLIR3(kMipsSlt, t0, rl_src1.high_reg, rl_src2.high_reg); - NewLIR3(kMipsSlt, t1, rl_src2.high_reg, rl_src1.high_reg); - NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0); - LIR* branch = OpCmpImmBranch(kCondNe, rl_result.low_reg, 0, NULL); - NewLIR3(kMipsSltu, t0, rl_src1.low_reg, rl_src2.low_reg); - NewLIR3(kMipsSltu, t1, rl_src2.low_reg, rl_src1.low_reg); - NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0); + NewLIR3(kMipsSlt, t0, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg()); + NewLIR3(kMipsSlt, t1, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg()); + NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0); + LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg.GetReg(), 0, NULL); + NewLIR3(kMipsSltu, t0, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); + NewLIR3(kMipsSltu, t1, rl_src2.reg.GetReg(), rl_src1.reg.GetReg()); + NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0); FreeTemp(t0); FreeTemp(t1); LIR* target = NewLIR0(kPseudoTargetLabel); @@ -228,9 +228,9 @@ RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2, NewLIR4(kMipsDiv, r_HI, r_LO, reg1, reg2); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (is_div) { - NewLIR2(kMipsMflo, rl_result.low_reg, r_LO); + NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO); } else { - NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI); + NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI); } return rl_result; } @@ -242,9 +242,9 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit, NewLIR4(kMipsDiv, r_HI, r_LO, reg1, t_reg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (is_div) { - NewLIR2(kMipsMflo, rl_result.low_reg, r_LO); + NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO); } else { - NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI); + NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI); } FreeTemp(t_reg); return rl_result; @@ -290,7 +290,7 @@ bool MipsMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); DCHECK(size == kSignedByte); - LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG); + LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG); StoreValue(rl_dest, rl_result); return true; } @@ -306,7 +306,7 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); DCHECK(size == kSignedByte); RegLocation rl_value = LoadValue(rl_src_value, kCoreReg); - StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size); + StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size); return true; } @@ -329,11 +329,11 @@ void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, int first_bit, int second_bit) { int t_reg = AllocTemp(); - OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit); - OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg); + OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit); + OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg); FreeTemp(t_reg); if (first_bit != 0) { - OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit); + OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit); } } @@ -385,11 +385,11 @@ void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, * addu v1,v1,t1 */ - OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src2.low_reg, rl_src1.low_reg); + OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg()); int t_reg = AllocTemp(); - OpRegRegReg(kOpAdd, t_reg, rl_src2.high_reg, rl_src1.high_reg); - NewLIR3(kMipsSltu, rl_result.high_reg, rl_result.low_reg, rl_src2.low_reg); - OpRegRegReg(kOpAdd, rl_result.high_reg, rl_result.high_reg, t_reg); + OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg()); + NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), rl_src2.reg.GetReg()); + OpRegRegReg(kOpAdd, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg); FreeTemp(t_reg); StoreValueWide(rl_dest, rl_result); } @@ -408,10 +408,10 @@ void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, */ int t_reg = AllocTemp(); - NewLIR3(kMipsSltu, t_reg, rl_src1.low_reg, rl_src2.low_reg); - OpRegRegReg(kOpSub, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg); - OpRegRegReg(kOpSub, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg); - OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg); + NewLIR3(kMipsSltu, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); + OpRegRegReg(kOpSub, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); + OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg()); + OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg); FreeTemp(t_reg); StoreValueWide(rl_dest, rl_result); } @@ -427,11 +427,11 @@ void MipsMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { * subu v1,v1,t1 */ - OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg); - OpRegReg(kOpNeg, rl_result.high_reg, rl_src.high_reg); + OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg()); + OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg()); int t_reg = AllocTemp(); - NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.low_reg); - OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg); + NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.reg.GetReg()); + OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg); FreeTemp(t_reg); StoreValueWide(rl_dest, rl_result); } @@ -471,7 +471,7 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, } /* null object? */ - GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags); + GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags); int reg_ptr = AllocTemp(); bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); @@ -479,28 +479,28 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, if (needs_range_check) { reg_len = AllocTemp(); /* Get len */ - LoadWordDisp(rl_array.low_reg, len_offset, reg_len); + LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len); } /* reg_ptr -> array data */ - OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset); - FreeTemp(rl_array.low_reg); + OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset); + FreeTemp(rl_array.reg.GetReg()); if ((size == kLong) || (size == kDouble)) { if (scale) { int r_new_index = AllocTemp(); - OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale); + OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale); OpRegReg(kOpAdd, reg_ptr, r_new_index); FreeTemp(r_new_index); } else { - OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg); + OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg()); } - FreeTemp(rl_index.low_reg); + FreeTemp(rl_index.reg.GetReg()); rl_result = EvalLoc(rl_dest, reg_class, true); if (needs_range_check) { - GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds); + GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds); FreeTemp(reg_len); } - LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG); + LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG); FreeTemp(reg_ptr); StoreValueWide(rl_dest, rl_result); @@ -508,10 +508,10 @@ void MipsMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, rl_result = EvalLoc(rl_dest, reg_class, true); if (needs_range_check) { - GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds); + GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds); FreeTemp(reg_len); } - LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size); + LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size); FreeTemp(reg_ptr); StoreValue(rl_dest, rl_result); @@ -538,17 +538,17 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, rl_index = LoadValue(rl_index, kCoreReg); int reg_ptr = INVALID_REG; bool allocated_reg_ptr_temp = false; - if (IsTemp(rl_array.low_reg) && !card_mark) { - Clobber(rl_array.low_reg); - reg_ptr = rl_array.low_reg; + if (IsTemp(rl_array.reg.GetReg()) && !card_mark) { + Clobber(rl_array.reg.GetReg()); + reg_ptr = rl_array.reg.GetReg(); } else { reg_ptr = AllocTemp(); - OpRegCopy(reg_ptr, rl_array.low_reg); + OpRegCopy(reg_ptr, rl_array.reg.GetReg()); allocated_reg_ptr_temp = true; } /* null object? */ - GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags); + GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags); bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK)); int reg_len = INVALID_REG; @@ -556,7 +556,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, reg_len = AllocTemp(); // NOTE: max live temps(4) here. /* Get len */ - LoadWordDisp(rl_array.low_reg, len_offset, reg_len); + LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len); } /* reg_ptr -> array data */ OpRegImm(kOpAdd, reg_ptr, data_offset); @@ -565,34 +565,34 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, // TUNING: specific wide routine that can handle fp regs if (scale) { int r_new_index = AllocTemp(); - OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale); + OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale); OpRegReg(kOpAdd, reg_ptr, r_new_index); FreeTemp(r_new_index); } else { - OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg); + OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg()); } rl_src = LoadValueWide(rl_src, reg_class); if (needs_range_check) { - GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds); + GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds); FreeTemp(reg_len); } - StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg); + StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); } else { rl_src = LoadValue(rl_src, reg_class); if (needs_range_check) { - GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds); + GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds); FreeTemp(reg_len); } - StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg, + StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(), scale, size); } if (allocated_reg_ptr_temp) { FreeTemp(reg_ptr); } if (card_mark) { - MarkGCCard(rl_src.low_reg, rl_array.low_reg); + MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg()); } } diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h index 00eef96cb5..59f442c61a 100644 --- a/compiler/dex/quick/mips/mips_lir.h +++ b/compiler/dex/quick/mips/mips_lir.h @@ -141,16 +141,6 @@ namespace art { #define rMIPS_LR INVALID_REG #define rMIPS_PC INVALID_REG -// RegisterLocation templates return values (r_V0, or r_V0/r_V1). -#define MIPS_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_V0, INVALID_REG, \ - INVALID_SREG, INVALID_SREG} -#define MIPS_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_FRESULT0, \ - INVALID_REG, INVALID_SREG, INVALID_SREG} -#define MIPS_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_RESULT0, \ - r_RESULT1, INVALID_SREG, INVALID_SREG} -#define MIPS_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_FRESULT0, \ - r_FRESULT1, INVALID_SREG, INVALID_SREG} - enum MipsResourceEncodingPos { kMipsGPReg0 = 0, kMipsRegSP = 29, @@ -279,6 +269,20 @@ enum MipsNativeRegisterPool { #define rMIPS_INVOKE_TGT r_T9 #define rMIPS_COUNT INVALID_REG +// RegisterLocation templates return values (r_V0, or r_V0/r_V1). +const RegLocation mips_loc_c_return + {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k32BitSolo, r_V0), INVALID_SREG, INVALID_SREG}; +const RegLocation mips_loc_c_return_wide + {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k64BitPair, r_V0, r_V1), INVALID_SREG, INVALID_SREG}; +const RegLocation mips_loc_c_return_float + {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k32BitSolo, r_F0), INVALID_SREG, INVALID_SREG}; +const RegLocation mips_loc_c_return_double + {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k64BitPair, r_F0, r_F1), INVALID_SREG, INVALID_SREG}; + enum MipsShiftEncodings { kMipsLsl = 0x0, kMipsLsr = 0x1, diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc index 224e8f21f2..85c250da0f 100644 --- a/compiler/dex/quick/mips/target_mips.cc +++ b/compiler/dex/quick/mips/target_mips.cc @@ -40,23 +40,19 @@ static int fp_temps[] = {r_F0, r_F1, r_F2, r_F3, r_F4, r_F5, r_F6, r_F7, r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15}; RegLocation MipsMir2Lir::LocCReturn() { - RegLocation res = MIPS_LOC_C_RETURN; - return res; + return mips_loc_c_return; } RegLocation MipsMir2Lir::LocCReturnWide() { - RegLocation res = MIPS_LOC_C_RETURN_WIDE; - return res; + return mips_loc_c_return_wide; } RegLocation MipsMir2Lir::LocCReturnFloat() { - RegLocation res = MIPS_LOC_C_RETURN_FLOAT; - return res; + return mips_loc_c_return_float; } RegLocation MipsMir2Lir::LocCReturnDouble() { - RegLocation res = MIPS_LOC_C_RETURN_DOUBLE; - return res; + return mips_loc_c_return_double; } // Return a target-dependent special register. @@ -441,27 +437,20 @@ void MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { #endif } -/* - * Alloc a pair of core registers, or a double. Low reg in low byte, - * high reg in next byte. - */ -int MipsMir2Lir::AllocTypedTempPair(bool fp_hint, - int reg_class) { +// Alloc a pair of core registers, or a double. +RegStorage MipsMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) { int high_reg; int low_reg; - int res = 0; if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) { low_reg = AllocTempDouble(); high_reg = low_reg + 1; - res = (low_reg & 0xff) | ((high_reg & 0xff) << 8); - return res; + return RegStorage(RegStorage::k64BitPair, low_reg, high_reg); } low_reg = AllocTemp(); high_reg = AllocTemp(); - res = (low_reg & 0xff) | ((high_reg & 0xff) << 8); - return res; + return RegStorage(RegStorage::k64BitPair, low_reg, high_reg); } int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) { @@ -505,11 +494,11 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() { } void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) { - if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) && - (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) { + if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) && + (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) { // No overlap, free both - FreeTemp(rl_free.low_reg); - FreeTemp(rl_free.high_reg); + FreeTemp(rl_free.reg.GetReg()); + FreeTemp(rl_free.reg.GetHighReg()); } } /* diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 8c2ed3667b..03d3634ccb 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -53,8 +53,9 @@ int Mir2Lir::LoadArg(int in_position, bool wide) { if (wide && reg_arg_high == INVALID_REG) { // If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg. if (reg_arg_low == INVALID_REG) { - int new_regs = AllocTypedTempPair(false, kAnyReg); - DECODE_REG_PAIR(new_regs, reg_arg_low, reg_arg_high); + RegStorage new_regs = AllocTypedTempWide(false, kAnyReg); + reg_arg_low = new_regs.GetReg(); + reg_arg_high = new_regs.GetHighReg(); LoadBaseDispWide(TargetReg(kSp), offset, reg_arg_low, reg_arg_high, INVALID_SREG); } else { reg_arg_high = AllocTemp(); @@ -70,6 +71,7 @@ int Mir2Lir::LoadArg(int in_position, bool wide) { } if (wide) { + // TODO: replace w/ RegStorage. return ENCODE_REG_PAIR(reg_arg_low, reg_arg_high); } else { return reg_arg_low; @@ -90,25 +92,25 @@ void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { if (!rl_dest.wide) { int reg = GetArgMappingToPhysicalReg(in_position); if (reg != INVALID_REG) { - OpRegCopy(rl_dest.low_reg, reg); + OpRegCopy(rl_dest.reg.GetReg(), reg); } else { - LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg); + LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg()); } } else { int reg_arg_low = GetArgMappingToPhysicalReg(in_position); int reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1); if (reg_arg_low != INVALID_REG && reg_arg_high != INVALID_REG) { - OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, reg_arg_low, reg_arg_high); + OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), reg_arg_low, reg_arg_high); } else if (reg_arg_low != INVALID_REG && reg_arg_high == INVALID_REG) { - OpRegCopy(rl_dest.low_reg, reg_arg_low); + OpRegCopy(rl_dest.reg.GetReg(), reg_arg_low); int offset_high = offset + sizeof(uint32_t); - LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.high_reg); + LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.reg.GetHighReg()); } else if (reg_arg_low == INVALID_REG && reg_arg_high != INVALID_REG) { - OpRegCopy(rl_dest.high_reg, reg_arg_high); - LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg); + OpRegCopy(rl_dest.reg.GetHighReg(), reg_arg_high); + LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg()); } else { - LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG); + LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG); } } } @@ -131,9 +133,9 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) { RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float); int reg_obj = LoadArg(data.object_arg); if (wide) { - LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG); + LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG); } else { - LoadBaseDisp(reg_obj, data.field_offset, rl_dest.low_reg, kWord, INVALID_SREG); + LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg.GetReg(), kWord, INVALID_SREG); } if (data.is_volatile) { GenMemBarrier(kLoadLoad); @@ -210,7 +212,7 @@ bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& speci successful = true; RegLocation rl_dest = GetReturn(cu_->shorty[0] == 'F'); GenPrintLabel(mir); - LoadConstant(rl_dest.low_reg, static_cast<int>(special.d.data)); + LoadConstant(rl_dest.reg.GetReg(), static_cast<int>(special.d.data)); return_mir = mir_graph_->GetNextUnconditionalMir(bb, mir); break; } @@ -371,19 +373,19 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list case Instruction::CONST_4: case Instruction::CONST_16: rl_result = EvalLoc(rl_dest, kAnyReg, true); - LoadConstantNoClobber(rl_result.low_reg, vB); + LoadConstantNoClobber(rl_result.reg.GetReg(), vB); StoreValue(rl_dest, rl_result); if (vB == 0) { - Workaround7250540(rl_dest, rl_result.low_reg); + Workaround7250540(rl_dest, rl_result.reg.GetReg()); } break; case Instruction::CONST_HIGH16: rl_result = EvalLoc(rl_dest, kAnyReg, true); - LoadConstantNoClobber(rl_result.low_reg, vB << 16); + LoadConstantNoClobber(rl_result.reg.GetReg(), vB << 16); StoreValue(rl_dest, rl_result); if (vB == 0) { - Workaround7250540(rl_dest, rl_result.low_reg); + Workaround7250540(rl_dest, rl_result.reg.GetReg()); } break; @@ -398,7 +400,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list case Instruction::CONST_WIDE_HIGH16: rl_result = EvalLoc(rl_dest, kAnyReg, true); - LoadConstantWide(rl_result.low_reg, rl_result.high_reg, + LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), static_cast<int64_t>(vB) << 48); StoreValueWide(rl_dest, rl_result); break; @@ -431,9 +433,9 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list int len_offset; len_offset = mirror::Array::LengthOffset().Int32Value(); rl_src[0] = LoadValue(rl_src[0], kCoreReg); - GenNullCheck(rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags); + GenNullCheck(rl_src[0].s_reg_low, rl_src[0].reg.GetReg(), opt_flags); rl_result = EvalLoc(rl_dest, kCoreReg, true); - LoadWordDisp(rl_src[0].low_reg, len_offset, rl_result.low_reg); + LoadWordDisp(rl_src[0].reg.GetReg(), len_offset, rl_result.reg.GetReg()); StoreValue(rl_dest, rl_result); break; diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index 6e97c531cd..7522cd9499 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -21,6 +21,7 @@ #include "compiled_method.h" #include "dex/compiler_enums.h" #include "dex/compiler_ir.h" +#include "dex/reg_storage.h" #include "dex/backend.h" #include "driver/compiler_driver.h" #include "leb128_encoder.h" @@ -813,7 +814,7 @@ class Mir2Lir : public Backend { virtual bool IsFpReg(int reg) = 0; virtual bool SameRegType(int reg1, int reg2) = 0; virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0; - virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0; + virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class) = 0; virtual int S2d(int low_reg, int high_reg) = 0; virtual int TargetReg(SpecialTargetRegister reg) = 0; virtual int GetArgMappingToPhysicalReg(int arg_num) = 0; diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc index 0a651713ab..3a8942e46e 100644 --- a/compiler/dex/quick/ralloc_util.cc +++ b/compiler/dex/quick/ralloc_util.cc @@ -460,7 +460,7 @@ void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) { DCHECK(!rl.wide); DCHECK(start && start->next); DCHECK(finish); - RegisterInfo* p = GetRegInfo(rl.low_reg); + RegisterInfo* p = GetRegInfo(rl.reg.GetReg()); p->def_start = start->next; p->def_end = finish; } @@ -474,8 +474,8 @@ void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) { DCHECK(rl.wide); DCHECK(start && start->next); DCHECK(finish); - RegisterInfo* p = GetRegInfo(rl.low_reg); - ResetDef(rl.high_reg); // Only track low of pair + RegisterInfo* p = GetRegInfo(rl.reg.GetReg()); + ResetDef(rl.reg.GetHighReg()); // Only track low of pair p->def_start = start->next; p->def_end = finish; } @@ -483,8 +483,8 @@ void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) { RegLocation Mir2Lir::WideToNarrow(RegLocation rl) { DCHECK(rl.wide); if (rl.location == kLocPhysReg) { - RegisterInfo* info_lo = GetRegInfo(rl.low_reg); - RegisterInfo* info_hi = GetRegInfo(rl.high_reg); + RegisterInfo* info_lo = GetRegInfo(rl.reg.GetReg()); + RegisterInfo* info_hi = GetRegInfo(rl.reg.GetHighReg()); if (info_lo->is_temp) { info_lo->pair = false; info_lo->def_start = NULL; @@ -502,18 +502,18 @@ RegLocation Mir2Lir::WideToNarrow(RegLocation rl) { void Mir2Lir::ResetDefLoc(RegLocation rl) { DCHECK(!rl.wide); - RegisterInfo* p = IsTemp(rl.low_reg); + RegisterInfo* p = IsTemp(rl.reg.GetReg()); if (p && !(cu_->disable_opt & (1 << kSuppressLoads))) { DCHECK(!p->pair); NullifyRange(p->def_start, p->def_end, p->s_reg, rl.s_reg_low); } - ResetDef(rl.low_reg); + ResetDef(rl.reg.GetReg()); } void Mir2Lir::ResetDefLocWide(RegLocation rl) { DCHECK(rl.wide); - RegisterInfo* p_low = IsTemp(rl.low_reg); - RegisterInfo* p_high = IsTemp(rl.high_reg); + RegisterInfo* p_low = IsTemp(rl.reg.GetReg()); + RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg()); if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) { DCHECK(p_low->pair); NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low); @@ -521,8 +521,8 @@ void Mir2Lir::ResetDefLocWide(RegLocation rl) { if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) { DCHECK(p_high->pair); } - ResetDef(rl.low_reg); - ResetDef(rl.high_reg); + ResetDef(rl.reg.GetReg()); + ResetDef(rl.reg.GetHighReg()); } void Mir2Lir::ResetDefTracking() { @@ -621,10 +621,10 @@ void Mir2Lir::MarkPair(int low_reg, int high_reg) { } void Mir2Lir::MarkClean(RegLocation loc) { - RegisterInfo* info = GetRegInfo(loc.low_reg); + RegisterInfo* info = GetRegInfo(loc.reg.GetReg()); info->dirty = false; if (loc.wide) { - info = GetRegInfo(loc.high_reg); + info = GetRegInfo(loc.reg.GetHighReg()); info->dirty = false; } } @@ -634,10 +634,10 @@ void Mir2Lir::MarkDirty(RegLocation loc) { // If already home, can't be dirty return; } - RegisterInfo* info = GetRegInfo(loc.low_reg); + RegisterInfo* info = GetRegInfo(loc.reg.GetReg()); info->dirty = true; if (loc.wide) { - info = GetRegInfo(loc.high_reg); + info = GetRegInfo(loc.reg.GetHighReg()); info->dirty = true; } } @@ -707,7 +707,7 @@ RegLocation Mir2Lir::UpdateLoc(RegLocation loc) { Clobber(info_lo->partner); FreeTemp(info_lo->reg); } else { - loc.low_reg = info_lo->reg; + loc.reg = RegStorage(RegStorage::k32BitSolo, info_lo->reg); loc.location = kLocPhysReg; } } @@ -744,11 +744,10 @@ RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) { } if (match) { // Can reuse - update the register usage info - loc.low_reg = info_lo->reg; - loc.high_reg = info_hi->reg; loc.location = kLocPhysReg; - MarkPair(loc.low_reg, loc.high_reg); - DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0)); + loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg); + MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg()); + DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0)); return loc; } // Can't easily reuse - clobber and free any overlaps @@ -779,7 +778,6 @@ RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) { RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) { DCHECK(loc.wide); - int32_t new_regs; int32_t low_reg; int32_t high_reg; @@ -787,22 +785,21 @@ RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) { /* If already in registers, we can assume proper form. Right reg class? */ if (loc.location == kLocPhysReg) { - DCHECK_EQ(IsFpReg(loc.low_reg), IsFpReg(loc.high_reg)); - DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0)); - if (!RegClassMatches(reg_class, loc.low_reg)) { + DCHECK_EQ(IsFpReg(loc.reg.GetReg()), IsFpReg(loc.reg.GetHighReg())); + DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0)); + if (!RegClassMatches(reg_class, loc.reg.GetReg())) { /* Wrong register class. Reallocate and copy */ - new_regs = AllocTypedTempPair(loc.fp, reg_class); - low_reg = new_regs & 0xff; - high_reg = (new_regs >> 8) & 0xff; - OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg); - CopyRegInfo(low_reg, loc.low_reg); - CopyRegInfo(high_reg, loc.high_reg); - Clobber(loc.low_reg); - Clobber(loc.high_reg); - loc.low_reg = low_reg; - loc.high_reg = high_reg; - MarkPair(loc.low_reg, loc.high_reg); - DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0)); + RegStorage new_regs = AllocTypedTempWide(loc.fp, reg_class); + low_reg = new_regs.GetReg(); + high_reg = new_regs.GetHighReg(); + OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg()); + CopyRegInfo(low_reg, loc.reg.GetReg()); + CopyRegInfo(high_reg, loc.reg.GetHighReg()); + Clobber(loc.reg.GetReg()); + Clobber(loc.reg.GetHighReg()); + loc.reg = new_regs; + MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg()); + DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0)); } return loc; } @@ -810,20 +807,18 @@ RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) { DCHECK_NE(loc.s_reg_low, INVALID_SREG); DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG); - new_regs = AllocTypedTempPair(loc.fp, reg_class); - loc.low_reg = new_regs & 0xff; - loc.high_reg = (new_regs >> 8) & 0xff; + loc.reg = AllocTypedTempWide(loc.fp, reg_class); - MarkPair(loc.low_reg, loc.high_reg); + MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg()); if (update) { loc.location = kLocPhysReg; - MarkLive(loc.low_reg, loc.s_reg_low); + MarkLive(loc.reg.GetReg(), loc.s_reg_low); // Does this wide value live in two registers or one vector register? - if (loc.low_reg != loc.high_reg) { - MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low)); + if (loc.reg.GetReg() != loc.reg.GetHighReg()) { + MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low)); } } - DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0)); + DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0)); return loc; } @@ -836,13 +831,13 @@ RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) { loc = UpdateLoc(loc); if (loc.location == kLocPhysReg) { - if (!RegClassMatches(reg_class, loc.low_reg)) { + if (!RegClassMatches(reg_class, loc.reg.GetReg())) { /* Wrong register class. Realloc, copy and transfer ownership */ new_reg = AllocTypedTemp(loc.fp, reg_class); - OpRegCopy(new_reg, loc.low_reg); - CopyRegInfo(new_reg, loc.low_reg); - Clobber(loc.low_reg); - loc.low_reg = new_reg; + OpRegCopy(new_reg, loc.reg.GetReg()); + CopyRegInfo(new_reg, loc.reg.GetReg()); + Clobber(loc.reg.GetReg()); + loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg); } return loc; } @@ -850,11 +845,11 @@ RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) { DCHECK_NE(loc.s_reg_low, INVALID_SREG); new_reg = AllocTypedTemp(loc.fp, reg_class); - loc.low_reg = new_reg; + loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg); if (update) { loc.location = kLocPhysReg; - MarkLive(loc.low_reg, loc.s_reg_low); + MarkLive(loc.reg.GetReg(), loc.s_reg_low); } return loc; } @@ -1006,32 +1001,29 @@ void Mir2Lir::DoPromotion() { if (curr->fp) { if (promotion_map_[p_map_idx].fp_location == kLocPhysReg) { curr->location = kLocPhysReg; - curr->low_reg = promotion_map_[p_map_idx].FpReg; + curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].FpReg); curr->home = true; } } else { if (promotion_map_[p_map_idx].core_location == kLocPhysReg) { curr->location = kLocPhysReg; - curr->low_reg = promotion_map_[p_map_idx].core_reg; + curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].core_reg); curr->home = true; } } - curr->high_reg = INVALID_REG; } else { if (curr->high_word) { continue; } if (curr->fp) { if ((promotion_map_[p_map_idx].fp_location == kLocPhysReg) && - (promotion_map_[p_map_idx+1].fp_location == - kLocPhysReg)) { + (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg)) { int low_reg = promotion_map_[p_map_idx].FpReg; int high_reg = promotion_map_[p_map_idx+1].FpReg; // Doubles require pair of singles starting at even reg if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) { curr->location = kLocPhysReg; - curr->low_reg = low_reg; - curr->high_reg = high_reg; + curr->reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg); curr->home = true; } } @@ -1040,8 +1032,8 @@ void Mir2Lir::DoPromotion() { && (promotion_map_[p_map_idx+1].core_location == kLocPhysReg)) { curr->location = kLocPhysReg; - curr->low_reg = promotion_map_[p_map_idx].core_reg; - curr->high_reg = promotion_map_[p_map_idx+1].core_reg; + curr->reg = RegStorage(RegStorage::k64BitPair, promotion_map_[p_map_idx].core_reg, + promotion_map_[p_map_idx+1].core_reg); curr->home = true; } } @@ -1068,13 +1060,13 @@ RegLocation Mir2Lir::GetReturnWide(bool is_double) { RegLocation gpr_res = LocCReturnWide(); RegLocation fpr_res = LocCReturnDouble(); RegLocation res = is_double ? fpr_res : gpr_res; - Clobber(res.low_reg); - Clobber(res.high_reg); - LockTemp(res.low_reg); - LockTemp(res.high_reg); + Clobber(res.reg.GetReg()); + Clobber(res.reg.GetHighReg()); + LockTemp(res.reg.GetReg()); + LockTemp(res.reg.GetHighReg()); // Does this wide value live in two registers or one vector register? - if (res.low_reg != res.high_reg) { - MarkPair(res.low_reg, res.high_reg); + if (res.reg.GetReg() != res.reg.GetHighReg()) { + MarkPair(res.reg.GetReg(), res.reg.GetHighReg()); } return res; } @@ -1083,11 +1075,11 @@ RegLocation Mir2Lir::GetReturn(bool is_float) { RegLocation gpr_res = LocCReturn(); RegLocation fpr_res = LocCReturnFloat(); RegLocation res = is_float ? fpr_res : gpr_res; - Clobber(res.low_reg); + Clobber(res.reg.GetReg()); if (cu_->instruction_set == kMips) { - MarkInUse(res.low_reg); + MarkInUse(res.reg.GetReg()); } else { - LockTemp(res.low_reg); + LockTemp(res.reg.GetReg()); } return res; } diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index 0613cdff7a..3708f01120 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -40,7 +40,7 @@ void X86Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, int key = keys[i]; BasicBlock* case_block = mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); - OpCmpImmBranch(kCondEq, rl_src.low_reg, key, + OpCmpImmBranch(kCondEq, rl_src.reg.GetReg(), key, &block_label_list_[case_block->id]); } } @@ -87,7 +87,7 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, // We can use the saved value. RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); rl_method = LoadValue(rl_method, kCoreReg); - start_of_method_reg = rl_method.low_reg; + start_of_method_reg = rl_method.reg.GetReg(); store_method_addr_used_ = true; } else { start_of_method_reg = AllocTemp(); @@ -97,10 +97,10 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, int keyReg; // Remove the bias, if necessary if (low_key == 0) { - keyReg = rl_src.low_reg; + keyReg = rl_src.reg.GetReg(); } else { keyReg = AllocTemp(); - OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key); + OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key); } // Bounds check - if < 0 or >= size continue following switch OpRegImm(kOpCmp, keyReg, size-1); @@ -164,7 +164,7 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) { void X86Mir2Lir::GenMoveException(RegLocation rl_dest) { int ex_offset = Thread::ExceptionOffset().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset); + NewLIR2(kX86Mov32RT, rl_result.reg.GetReg(), ex_offset); NewLIR2(kX86Mov32TI, ex_offset, 0); StoreValue(rl_dest, rl_result); } diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 421d51e4fd..55f18ef341 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -48,8 +48,9 @@ class X86Mir2Lir : public Mir2Lir { // Required for target - register utilities. bool IsFpReg(int reg); bool SameRegType(int reg1, int reg2); + // TODO: for consistency, make this return a RegStorage as well? int AllocTypedTemp(bool fp_hint, int reg_class); - int AllocTypedTempPair(bool fp_hint, int reg_class); + RegStorage AllocTypedTempWide(bool fp_hint, int reg_class); int S2d(int low_reg, int high_reg); int TargetReg(SpecialTargetRegister reg); int GetArgMappingToPhysicalReg(int arg_num); diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index 4c2ecc0efd..1827901b1b 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -63,9 +63,9 @@ void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode, rl_src1 = LoadValue(rl_src1, kFPReg); rl_src2 = LoadValue(rl_src2, kFPReg); rl_result = EvalLoc(rl_dest, kFPReg, true); - int r_dest = rl_result.low_reg; - int r_src1 = rl_src1.low_reg; - int r_src2 = rl_src2.low_reg; + int r_dest = rl_result.reg.GetReg(); + int r_src1 = rl_src1.reg.GetReg(); + int r_src2 = rl_src2.reg.GetReg(); if (r_dest == r_src2) { r_src2 = AllocTempFloat(); OpRegCopy(r_src2, r_dest); @@ -118,9 +118,9 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode, rl_result = EvalLoc(rl_dest, kFPReg, true); DCHECK(rl_dest.wide); DCHECK(rl_result.wide); - int r_dest = S2d(rl_result.low_reg, rl_result.high_reg); - int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg); - int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg); + int r_dest = S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()); + int r_src1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()); + int r_src2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()); if (r_dest == r_src2) { r_src2 = AllocTempDouble() | X86_FP_DOUBLE; OpRegCopy(r_src2, r_dest); @@ -140,7 +140,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do // If the source is in physical register, then put it in its location on stack. if (rl_src.location == kLocPhysReg) { - RegisterInfo* lo_info = GetRegInfo(rl_src.low_reg); + RegisterInfo* lo_info = GetRegInfo(rl_src.reg.GetReg()); if (lo_info != nullptr && lo_info->is_temp) { // Calling FlushSpecificReg because it will only write back VR if it is dirty. @@ -148,7 +148,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do } else { // It must have been register promoted if it is not a temp but is still in physical // register. Since we need it to be in memory to convert, we place it there now. - StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.low_reg, rl_src.high_reg); + StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); } } @@ -181,13 +181,13 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do if (is_double) { rl_result = EvalLocWide(rl_dest, kFPReg, true); - LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG); + LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG); StoreValueWide(rl_dest, rl_result); } else { rl_result = EvalLoc(rl_dest, kFPReg, true); - LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg); + LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg()); StoreValue(rl_dest, rl_result); } @@ -219,21 +219,21 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, break; case Instruction::FLOAT_TO_INT: { rl_src = LoadValue(rl_src, kFPReg); - src_reg = rl_src.low_reg; + src_reg = rl_src.reg.GetReg(); // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc() ClobberSReg(rl_dest.s_reg_low); rl_result = EvalLoc(rl_dest, kCoreReg, true); int temp_reg = AllocTempFloat(); - LoadConstant(rl_result.low_reg, 0x7fffffff); - NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg); + LoadConstant(rl_result.reg.GetReg(), 0x7fffffff); + NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.reg.GetReg()); NewLIR2(kX86ComissRR, src_reg, temp_reg); LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA); LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP); - NewLIR2(kX86Cvttss2siRR, rl_result.low_reg, src_reg); + NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), src_reg); LIR* branch_normal = NewLIR1(kX86Jmp8, 0); branch_na_n->target = NewLIR0(kPseudoTargetLabel); - NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg); + NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg()); branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel); branch_normal->target = NewLIR0(kPseudoTargetLabel); StoreValue(rl_dest, rl_result); @@ -241,21 +241,21 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, } case Instruction::DOUBLE_TO_INT: { rl_src = LoadValueWide(rl_src, kFPReg); - src_reg = rl_src.low_reg; + src_reg = rl_src.reg.GetReg(); // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc() ClobberSReg(rl_dest.s_reg_low); rl_result = EvalLoc(rl_dest, kCoreReg, true); int temp_reg = AllocTempDouble() | X86_FP_DOUBLE; - LoadConstant(rl_result.low_reg, 0x7fffffff); - NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg); + LoadConstant(rl_result.reg.GetReg(), 0x7fffffff); + NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.reg.GetReg()); NewLIR2(kX86ComisdRR, src_reg, temp_reg); LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA); LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP); - NewLIR2(kX86Cvttsd2siRR, rl_result.low_reg, src_reg); + NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), src_reg); LIR* branch_normal = NewLIR1(kX86Jmp8, 0); branch_na_n->target = NewLIR0(kPseudoTargetLabel); - NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg); + NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg()); branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel); branch_normal->target = NewLIR0(kPseudoTargetLabel); StoreValue(rl_dest, rl_result); @@ -278,18 +278,18 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, } if (rl_src.wide) { rl_src = LoadValueWide(rl_src, rcSrc); - src_reg = S2d(rl_src.low_reg, rl_src.high_reg); + src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()); } else { rl_src = LoadValue(rl_src, rcSrc); - src_reg = rl_src.low_reg; + src_reg = rl_src.reg.GetReg(); } if (rl_dest.wide) { rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg); + NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg); StoreValueWide(rl_dest, rl_result); } else { rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(op, rl_result.low_reg, src_reg); + NewLIR2(op, rl_result.reg.GetReg(), src_reg); StoreValue(rl_dest, rl_result); } } @@ -302,19 +302,19 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest, int src_reg2; if (single) { rl_src1 = LoadValue(rl_src1, kFPReg); - src_reg1 = rl_src1.low_reg; + src_reg1 = rl_src1.reg.GetReg(); rl_src2 = LoadValue(rl_src2, kFPReg); - src_reg2 = rl_src2.low_reg; + src_reg2 = rl_src2.reg.GetReg(); } else { rl_src1 = LoadValueWide(rl_src1, kFPReg); - src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg); + src_reg1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()); rl_src2 = LoadValueWide(rl_src2, kFPReg); - src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg); + src_reg2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()); } // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc() ClobberSReg(rl_dest.s_reg_low); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - LoadConstantNoClobber(rl_result.low_reg, unordered_gt ? 1 : 0); + LoadConstantNoClobber(rl_result.reg.GetReg(), unordered_gt ? 1 : 0); if (single) { NewLIR2(kX86UcomissRR, src_reg1, src_reg2); } else { @@ -325,20 +325,20 @@ void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest, branch = NewLIR2(kX86Jcc8, 0, kX86CondPE); } // If the result reg can't be byte accessed, use a jump and move instead of a set. - if (rl_result.low_reg >= 4) { + if (rl_result.reg.GetReg() >= 4) { LIR* branch2 = NULL; if (unordered_gt) { branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA); - NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x0); + NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0); } else { branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe); - NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x1); + NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x1); } branch2->target = NewLIR0(kPseudoTargetLabel); } else { - NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */); + NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondA /* above - unsigned > */); } - NewLIR2(kX86Sbb32RI, rl_result.low_reg, 0); + NewLIR2(kX86Sbb32RI, rl_result.reg.GetReg(), 0); if (unordered_gt) { branch->target = NewLIR0(kPseudoTargetLabel); } @@ -357,14 +357,14 @@ void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, rl_src2 = mir_graph_->GetSrcWide(mir, 2); rl_src1 = LoadValueWide(rl_src1, kFPReg); rl_src2 = LoadValueWide(rl_src2, kFPReg); - NewLIR2(kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg), - S2d(rl_src2.low_reg, rl_src2.high_reg)); + NewLIR2(kX86UcomisdRR, S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()), + S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg())); } else { rl_src1 = mir_graph_->GetSrc(mir, 0); rl_src2 = mir_graph_->GetSrc(mir, 1); rl_src1 = LoadValue(rl_src1, kFPReg); rl_src2 = LoadValue(rl_src2, kFPReg); - NewLIR2(kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg); + NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); } ConditionCode ccode = mir->meta.ccode; switch (ccode) { @@ -418,7 +418,7 @@ void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValue(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000); + OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000); StoreValue(rl_dest, rl_result); } @@ -426,8 +426,8 @@ void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) { RegLocation rl_result; rl_src = LoadValueWide(rl_src, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000); - OpRegCopy(rl_result.low_reg, rl_src.low_reg); + OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg()); StoreValueWide(rl_dest, rl_result); } @@ -436,8 +436,8 @@ bool X86Mir2Lir::GenInlinedSqrt(CallInfo* info) { RegLocation rl_dest = InlineTargetWide(info); // double place for result rl_src = LoadValueWide(rl_src, kFPReg); RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true); - NewLIR2(kX86SqrtsdRR, S2d(rl_result.low_reg, rl_result.high_reg), - S2d(rl_src.low_reg, rl_src.high_reg)); + NewLIR2(kX86SqrtsdRR, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), + S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg())); StoreValueWide(rl_dest, rl_result); return true; } diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 5f04b7d152..362ab2e57e 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -216,21 +216,21 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { * mov t1, $false_case * cmovnz result_reg, t1 */ - const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.low_reg == rl_result.low_reg); + const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.reg.GetReg() == rl_result.reg.GetReg()); const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src); const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src); const bool catch_all_case = !(true_zero_case || false_zero_case); if (true_zero_case || false_zero_case) { - OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg); + OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg()); } if (true_zero_case || false_zero_case || catch_all_case) { - OpRegImm(kOpCmp, rl_src.low_reg, 0); + OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0); } if (catch_all_case) { - OpRegImm(kOpMov, rl_result.low_reg, true_val); + OpRegImm(kOpMov, rl_result.reg.GetReg(), true_val); } if (true_zero_case || false_zero_case || catch_all_case) { @@ -239,7 +239,7 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { OpRegImm(kOpMov, temp1_reg, immediateForTemp); ConditionCode cc = false_zero_case ? kCondEq : kCondNe; - OpCondRegReg(kOpCmov, cc, rl_result.low_reg, temp1_reg); + OpCondRegReg(kOpCmov, cc, rl_result.reg.GetReg(), temp1_reg); FreeTemp(temp1_reg); } @@ -264,15 +264,15 @@ void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) { */ // kMirOpSelect is generated just for conditional cases when comparison is done with zero. - OpRegImm(kOpCmp, rl_src.low_reg, 0); + OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0); - if (rl_result.low_reg == rl_true.low_reg) { - OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg); - } else if (rl_result.low_reg == rl_false.low_reg) { - OpCondRegReg(kOpCmov, kCondEq, rl_result.low_reg, rl_true.low_reg); + if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) { + OpCondRegReg(kOpCmov, kCondNe, rl_result.reg.GetReg(), rl_false.reg.GetReg()); + } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) { + OpCondRegReg(kOpCmov, kCondEq, rl_result.reg.GetReg(), rl_true.reg.GetReg()); } else { - OpRegCopy(rl_result.low_reg, rl_true.low_reg); - OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg); + OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg()); + OpCondRegReg(kOpCmov, kCondNe, rl_result.reg.GetReg(), rl_false.reg.GetReg()); } } @@ -337,8 +337,8 @@ void X86Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1, LIR* taken = &block_label_list_[bb->taken]; LIR* not_taken = &block_label_list_[bb->fall_through]; rl_src1 = LoadValueWide(rl_src1, kCoreReg); - int32_t low_reg = rl_src1.low_reg; - int32_t high_reg = rl_src1.high_reg; + int32_t low_reg = rl_src1.reg.GetReg(); + int32_t high_reg = rl_src1.reg.GetHighReg(); if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) { int t_reg = AllocTemp(); @@ -461,7 +461,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, // Assume that the result will be in EDX. RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, - r2, INVALID_REG, INVALID_SREG, INVALID_SREG}; + RegStorage(RegStorage::k32BitSolo, r2), INVALID_SREG, INVALID_SREG}; // handle div/rem by 1 special case. if (imm == 1) { @@ -472,7 +472,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, // x % 1 == 0. LoadConstantNoClobber(r0, 0); // For this case, return the result in EAX. - rl_result.low_reg = r0; + rl_result.reg.SetReg(r0); } } else if (imm == -1) { // handle 0x80000000 / -1 special case. if (is_div) { @@ -494,7 +494,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, LoadConstantNoClobber(r0, 0); } // For this case, return the result in EAX. - rl_result.low_reg = r0; + rl_result.reg.SetReg(r0); } else { CHECK(imm <= -2 || imm >= 2); // Use H.S.Warren's Hacker's Delight Chapter 10 and @@ -524,8 +524,8 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, // We will need the value later. if (rl_src.location == kLocPhysReg) { // We can use it directly. - DCHECK(rl_src.low_reg != r0 && rl_src.low_reg != r2); - numerator_reg = rl_src.low_reg; + DCHECK(rl_src.reg.GetReg() != r0 && rl_src.reg.GetReg() != r2); + numerator_reg = rl_src.reg.GetReg(); } else { LoadValueDirectFixed(rl_src, r1); numerator_reg = r1; @@ -582,7 +582,7 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src, NewLIR2(kX86Sub32RR, r0, r2); // For this case, return the result in EAX. - rl_result.low_reg = r0; + rl_result.reg.SetReg(r0); } } @@ -638,9 +638,9 @@ RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, // Result is in EAX for div and EDX for rem. RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, - r0, INVALID_REG, INVALID_SREG, INVALID_SREG}; + RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG}; if (!is_div) { - rl_result.low_reg = r2; + rl_result.reg.SetReg(r2); } return rl_result; } @@ -662,22 +662,22 @@ bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { * The reason is that the first copy will inadvertently clobber the second element with * the first one thus yielding the wrong result. Thus we do a swap in that case. */ - if (rl_result.low_reg == rl_src2.low_reg) { + if (rl_result.reg.GetReg() == rl_src2.reg.GetReg()) { std::swap(rl_src1, rl_src2); } // Pick the first integer as min/max. - OpRegCopy(rl_result.low_reg, rl_src1.low_reg); + OpRegCopy(rl_result.reg.GetReg(), rl_src1.reg.GetReg()); // If the integers are both in the same register, then there is nothing else to do // because they are equal and we have already moved one into the result. - if (rl_src1.low_reg != rl_src2.low_reg) { + if (rl_src1.reg.GetReg() != rl_src2.reg.GetReg()) { // It is possible we didn't pick correctly so do the actual comparison now. - OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg); + OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg()); // Conditionally move the other integer into the destination register. ConditionCode condition_code = is_min ? kCondGt : kCondLt; - OpCondRegReg(kOpCmov, condition_code, rl_result.low_reg, rl_src2.low_reg); + OpCondRegReg(kOpCmov, condition_code, rl_result.reg.GetReg(), rl_src2.reg.GetReg()); } StoreValue(rl_dest, rl_result); @@ -692,12 +692,12 @@ bool X86Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); if (size == kLong) { // Unaligned access is allowed on x86. - LoadBaseDispWide(rl_address.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG); + LoadBaseDispWide(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG); StoreValueWide(rl_dest, rl_result); } else { DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord); // Unaligned access is allowed on x86. - LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG); + LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG); StoreValue(rl_dest, rl_result); } return true; @@ -711,12 +711,12 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) { if (size == kLong) { // Unaligned access is allowed on x86. RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg); - StoreBaseDispWide(rl_address.low_reg, 0, rl_value.low_reg, rl_value.high_reg); + StoreBaseDispWide(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg()); } else { DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord); // Unaligned access is allowed on x86. RegLocation rl_value = LoadValue(rl_src_value, kCoreReg); - StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size); + StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size); } return true; } @@ -776,13 +776,13 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) { // Mark card for object assuming new value is stored. FreeTemp(r0); // Temporarily release EAX for MarkGCCard(). - MarkGCCard(rl_new_value.low_reg, rl_object.low_reg); + MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg()); LockTemp(r0); } RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg); LoadValueDirect(rl_src_expected, r0); - NewLIR5(kX86LockCmpxchgAR, rl_object.low_reg, rl_offset.low_reg, 0, 0, rl_new_value.low_reg); + NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg()); FreeTemp(r0); } @@ -790,8 +790,8 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { // Convert ZF to boolean RegLocation rl_dest = InlineTarget(info); // boolean place for result RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondZ); - NewLIR2(kX86Movzx8RR, rl_result.low_reg, rl_result.low_reg); + NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondZ); + NewLIR2(kX86Movzx8RR, rl_result.reg.GetReg(), rl_result.reg.GetReg()); StoreValue(rl_dest, rl_result); return true; } @@ -830,11 +830,11 @@ void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, int first_bit, int second_bit) { int t_reg = AllocTemp(); - OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit); - OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg); + OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit); + OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg); FreeTemp(t_reg); if (first_bit != 0) { - OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit); + OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit); } } @@ -918,8 +918,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation int64_t val = mir_graph_->ConstantValueWide(rl_src2); if (val == 0) { RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); - OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg); - OpRegReg(kOpXor, rl_result.high_reg, rl_result.high_reg); + OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg()); + OpRegReg(kOpXor, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg()); StoreValueWide(rl_dest, rl_result); return; } else if (val == 1) { @@ -952,8 +952,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation // ECX <- 1H * 2L // EAX <- 1L * 2H if (src1_in_reg) { - GenImulRegImm(r1, rl_src1.high_reg, val_lo); - GenImulRegImm(r0, rl_src1.low_reg, val_hi); + GenImulRegImm(r1, rl_src1.reg.GetHighReg(), val_lo); + GenImulRegImm(r0, rl_src1.reg.GetReg(), val_hi); } else { GenImulMemImm(r1, GetSRegHi(rl_src1.s_reg_low), displacement + HIWORD_OFFSET, val_lo); GenImulMemImm(r0, rl_src1.s_reg_low, displacement + LOWORD_OFFSET, val_hi); @@ -967,7 +967,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation // EDX:EAX <- 2L * 1L (double precision) if (src1_in_reg) { - NewLIR1(kX86Mul32DaR, rl_src1.low_reg); + NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg()); } else { LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET); AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2, @@ -978,7 +978,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation NewLIR2(kX86Add32RR, r2, r1); // Result is EDX:EAX - RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2, + RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k64BitPair, r0, r2), INVALID_SREG, INVALID_SREG}; StoreValueWide(rl_dest, rl_result); return; @@ -1000,7 +1001,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation // ECX <- 1H if (src1_in_reg) { - NewLIR2(kX86Mov32RR, r1, rl_src1.high_reg); + NewLIR2(kX86Mov32RR, r1, rl_src1.reg.GetHighReg()); } else { LoadBaseDisp(rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, r1, kWord, GetSRegHi(rl_src1.s_reg_low)); @@ -1010,7 +1011,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation // Take advantage of the fact that the values are the same. // ECX <- ECX * 2L (1H * 2L) if (src2_in_reg) { - NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg); + NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg()); } else { int displacement = SRegOffset(rl_src2.s_reg_low); LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET); @@ -1023,7 +1024,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation } else { // EAX <- 2H if (src2_in_reg) { - NewLIR2(kX86Mov32RR, r0, rl_src2.high_reg); + NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetHighReg()); } else { LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, r0, kWord, GetSRegHi(rl_src2.s_reg_low)); @@ -1031,7 +1032,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation // EAX <- EAX * 1L (2H * 1L) if (src1_in_reg) { - NewLIR2(kX86Imul32RR, r0, rl_src1.low_reg); + NewLIR2(kX86Imul32RR, r0, rl_src1.reg.GetReg()); } else { int displacement = SRegOffset(rl_src1.s_reg_low); LIR *m = NewLIR3(kX86Imul32RM, r0, rX86_SP, displacement + LOWORD_OFFSET); @@ -1041,7 +1042,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation // ECX <- ECX * 2L (1H * 2L) if (src2_in_reg) { - NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg); + NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg()); } else { int displacement = SRegOffset(rl_src2.s_reg_low); LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET); @@ -1055,7 +1056,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation // EAX <- 2L if (src2_in_reg) { - NewLIR2(kX86Mov32RR, r0, rl_src2.low_reg); + NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetReg()); } else { LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, r0, kWord, rl_src2.s_reg_low); @@ -1063,7 +1064,7 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation // EDX:EAX <- 2L * 1L (double precision) if (src1_in_reg) { - NewLIR1(kX86Mul32DaR, rl_src1.low_reg); + NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg()); } else { int displacement = SRegOffset(rl_src1.s_reg_low); LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET); @@ -1075,8 +1076,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation NewLIR2(kX86Add32RR, r2, r1); // Result is EDX:EAX - RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2, - INVALID_SREG, INVALID_SREG}; + RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k64BitPair, r0, r2), INVALID_SREG, INVALID_SREG}; StoreValueWide(rl_dest, rl_result); } @@ -1086,18 +1087,18 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false); if (rl_src.location == kLocPhysReg) { // Both operands are in registers. - if (rl_dest.low_reg == rl_src.high_reg) { + if (rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()) { // The registers are the same, so we would clobber it before the use. int temp_reg = AllocTemp(); - OpRegCopy(temp_reg, rl_dest.low_reg); - rl_src.high_reg = temp_reg; + OpRegCopy(temp_reg, rl_dest.reg.GetReg()); + rl_src.reg.SetHighReg(temp_reg); } - NewLIR2(x86op, rl_dest.low_reg, rl_src.low_reg); + NewLIR2(x86op, rl_dest.reg.GetReg(), rl_src.reg.GetReg()); x86op = GetOpcode(op, rl_dest, rl_src, true); - NewLIR2(x86op, rl_dest.high_reg, rl_src.high_reg); - FreeTemp(rl_src.low_reg); - FreeTemp(rl_src.high_reg); + NewLIR2(x86op, rl_dest.reg.GetHighReg(), rl_src.reg.GetHighReg()); + FreeTemp(rl_src.reg.GetReg()); + FreeTemp(rl_src.reg.GetHighReg()); return; } @@ -1107,11 +1108,11 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, int rBase = TargetReg(kSp); int displacement = SRegOffset(rl_src.s_reg_low); - LIR *lir = NewLIR3(x86op, rl_dest.low_reg, rBase, displacement + LOWORD_OFFSET); + LIR *lir = NewLIR3(x86op, rl_dest.reg.GetReg(), rBase, displacement + LOWORD_OFFSET); AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, true /* is_load */, true /* is64bit */); x86op = GetOpcode(op, rl_dest, rl_src, true); - lir = NewLIR3(x86op, rl_dest.high_reg, rBase, displacement + HIWORD_OFFSET); + lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), rBase, displacement + HIWORD_OFFSET); AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, true /* is_load */, true /* is64bit */); } @@ -1138,15 +1139,15 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi int rBase = TargetReg(kSp); int displacement = SRegOffset(rl_dest.s_reg_low); - LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.low_reg); + LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.reg.GetReg()); AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2, false /* is_load */, true /* is64bit */); x86op = GetOpcode(op, rl_dest, rl_src, true); - lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.high_reg); + lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg()); AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, false /* is_load */, true /* is64bit */); - FreeTemp(rl_src.low_reg); - FreeTemp(rl_src.high_reg); + FreeTemp(rl_src.reg.GetReg()); + FreeTemp(rl_src.reg.GetHighReg()); } void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1, @@ -1188,12 +1189,12 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1, // Get one of the source operands into temporary register. rl_src1 = LoadValueWide(rl_src1, kCoreReg); - if (IsTemp(rl_src1.low_reg) && IsTemp(rl_src1.high_reg)) { + if (IsTemp(rl_src1.reg.GetReg()) && IsTemp(rl_src1.reg.GetHighReg())) { GenLongRegOrMemOp(rl_src1, rl_src2, op); } else if (is_commutative) { rl_src2 = LoadValueWide(rl_src2, kCoreReg); // We need at least one of them to be a temporary. - if (!(IsTemp(rl_src2.low_reg) && IsTemp(rl_src2.high_reg))) { + if (!(IsTemp(rl_src2.reg.GetReg()) && IsTemp(rl_src2.reg.GetHighReg()))) { rl_src1 = ForceTempWide(rl_src1); } GenLongRegOrMemOp(rl_src1, rl_src2, op); @@ -1234,15 +1235,16 @@ void X86Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { rl_src = LoadValueWide(rl_src, kCoreReg); RegLocation rl_result = ForceTempWide(rl_src); - if (rl_dest.low_reg == rl_src.high_reg) { + if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) && + ((rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()))) { // The registers are the same, so we would clobber it before the use. int temp_reg = AllocTemp(); - OpRegCopy(temp_reg, rl_result.low_reg); - rl_result.high_reg = temp_reg; + OpRegCopy(temp_reg, rl_result.reg.GetReg()); + rl_result.reg.SetHighReg(temp_reg); } - OpRegReg(kOpNeg, rl_result.low_reg, rl_result.low_reg); // rLow = -rLow - OpRegImm(kOpAdc, rl_result.high_reg, 0); // rHigh = rHigh + CF - OpRegReg(kOpNeg, rl_result.high_reg, rl_result.high_reg); // rHigh = -rHigh + OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_result.reg.GetReg()); // rLow = -rLow + OpRegImm(kOpAdc, rl_result.reg.GetHighReg(), 0); // rHigh = rHigh + CF + OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg()); // rHigh = -rHigh StoreValueWide(rl_dest, rl_result); } @@ -1284,29 +1286,29 @@ void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, // If index is constant, just fold it into the data offset data_offset += constant_index_value << scale; // treat as non array below - rl_index.low_reg = INVALID_REG; + rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG); } /* null object? */ - GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags); + GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags); if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) { if (constant_index) { - GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset, + GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset, constant_index_value, kThrowConstantArrayBounds); } else { - GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg, + GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(), len_offset, kThrowArrayBounds); } } rl_result = EvalLoc(rl_dest, reg_class, true); if ((size == kLong) || (size == kDouble)) { - LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_result.low_reg, - rl_result.high_reg, size, INVALID_SREG); + LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_result.reg.GetReg(), + rl_result.reg.GetHighReg(), size, INVALID_SREG); StoreValueWide(rl_dest, rl_result); } else { - LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, - data_offset, rl_result.low_reg, INVALID_REG, size, + LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, + data_offset, rl_result.reg.GetReg(), INVALID_REG, size, INVALID_SREG); StoreValue(rl_dest, rl_result); } @@ -1338,18 +1340,18 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, constant_index_value = mir_graph_->ConstantValue(rl_index); data_offset += constant_index_value << scale; // treat as non array below - rl_index.low_reg = INVALID_REG; + rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG); } /* null object? */ - GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags); + GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags); if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) { if (constant_index) { - GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset, + GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset, constant_index_value, kThrowConstantArrayBounds); } else { - GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg, + GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(), len_offset, kThrowArrayBounds); } } @@ -1359,21 +1361,21 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, rl_src = LoadValue(rl_src, reg_class); } // If the src reg can't be byte accessed, move it to a temp first. - if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) { + if ((size == kSignedByte || size == kUnsignedByte) && rl_src.reg.GetReg() >= 4) { int temp = AllocTemp(); - OpRegCopy(temp, rl_src.low_reg); - StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp, + OpRegCopy(temp, rl_src.reg.GetReg()); + StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, temp, INVALID_REG, size, INVALID_SREG); } else { - StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg, - rl_src.high_reg, size, INVALID_SREG); + StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_src.reg.GetReg(), + rl_src.wide ? rl_src.reg.GetHighReg() : INVALID_REG, size, INVALID_SREG); } if (card_mark) { // Free rl_index if its a temp. Ensures there are 2 free regs for card mark. if (!constant_index) { - FreeTemp(rl_index.low_reg); + FreeTemp(rl_index.reg.GetReg()); } - MarkGCCard(rl_src.low_reg, rl_array.low_reg); + MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg()); } } @@ -1385,52 +1387,52 @@ RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation case Instruction::SHL_LONG_2ADDR: DCHECK_NE(shift_amount, 1); // Prevent a double store from happening. if (shift_amount == 32) { - OpRegCopy(rl_result.high_reg, rl_src.low_reg); - LoadConstant(rl_result.low_reg, 0); + OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg()); + LoadConstant(rl_result.reg.GetReg(), 0); } else if (shift_amount > 31) { - OpRegCopy(rl_result.high_reg, rl_src.low_reg); - FreeTemp(rl_src.high_reg); - NewLIR2(kX86Sal32RI, rl_result.high_reg, shift_amount - 32); - LoadConstant(rl_result.low_reg, 0); + OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg()); + FreeTemp(rl_src.reg.GetHighReg()); + NewLIR2(kX86Sal32RI, rl_result.reg.GetHighReg(), shift_amount - 32); + LoadConstant(rl_result.reg.GetReg(), 0); } else { - OpRegCopy(rl_result.low_reg, rl_src.low_reg); - OpRegCopy(rl_result.high_reg, rl_src.high_reg); - NewLIR3(kX86Shld32RRI, rl_result.high_reg, rl_result.low_reg, shift_amount); - NewLIR2(kX86Sal32RI, rl_result.low_reg, shift_amount); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg()); + OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg()); + NewLIR3(kX86Shld32RRI, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), shift_amount); + NewLIR2(kX86Sal32RI, rl_result.reg.GetReg(), shift_amount); } break; case Instruction::SHR_LONG: case Instruction::SHR_LONG_2ADDR: if (shift_amount == 32) { - OpRegCopy(rl_result.low_reg, rl_src.high_reg); - OpRegCopy(rl_result.high_reg, rl_src.high_reg); - NewLIR2(kX86Sar32RI, rl_result.high_reg, 31); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg()); + OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg()); + NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31); } else if (shift_amount > 31) { - OpRegCopy(rl_result.low_reg, rl_src.high_reg); - OpRegCopy(rl_result.high_reg, rl_src.high_reg); - NewLIR2(kX86Sar32RI, rl_result.low_reg, shift_amount - 32); - NewLIR2(kX86Sar32RI, rl_result.high_reg, 31); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg()); + OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg()); + NewLIR2(kX86Sar32RI, rl_result.reg.GetReg(), shift_amount - 32); + NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31); } else { - OpRegCopy(rl_result.low_reg, rl_src.low_reg); - OpRegCopy(rl_result.high_reg, rl_src.high_reg); - NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount); - NewLIR2(kX86Sar32RI, rl_result.high_reg, shift_amount); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg()); + OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg()); + NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount); + NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), shift_amount); } break; case Instruction::USHR_LONG: case Instruction::USHR_LONG_2ADDR: if (shift_amount == 32) { - OpRegCopy(rl_result.low_reg, rl_src.high_reg); - LoadConstant(rl_result.high_reg, 0); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg()); + LoadConstant(rl_result.reg.GetHighReg(), 0); } else if (shift_amount > 31) { - OpRegCopy(rl_result.low_reg, rl_src.high_reg); - NewLIR2(kX86Shr32RI, rl_result.low_reg, shift_amount - 32); - LoadConstant(rl_result.high_reg, 0); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg()); + NewLIR2(kX86Shr32RI, rl_result.reg.GetReg(), shift_amount - 32); + LoadConstant(rl_result.reg.GetHighReg(), 0); } else { - OpRegCopy(rl_result.low_reg, rl_src.low_reg); - OpRegCopy(rl_result.high_reg, rl_src.high_reg); - NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount); - NewLIR2(kX86Shr32RI, rl_result.high_reg, shift_amount); + OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg()); + OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg()); + NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount); + NewLIR2(kX86Shr32RI, rl_result.reg.GetHighReg(), shift_amount); } break; default: @@ -1567,7 +1569,7 @@ X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation loc, bool is_h int32_t value) { bool in_mem = loc.location != kLocPhysReg; bool byte_imm = IS_SIMM8(value); - DCHECK(in_mem || !IsFpReg(loc.low_reg)); + DCHECK(in_mem || !IsFpReg(loc.reg.GetReg())); switch (op) { case Instruction::ADD_LONG: case Instruction::ADD_LONG_2ADDR: @@ -1647,15 +1649,15 @@ void X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true); DCHECK_EQ(rl_result.location, kLocPhysReg); - DCHECK(!IsFpReg(rl_result.low_reg)); + DCHECK(!IsFpReg(rl_result.reg.GetReg())); if (!IsNoOp(op, val_lo)) { X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo); - NewLIR2(x86op, rl_result.low_reg, val_lo); + NewLIR2(x86op, rl_result.reg.GetReg(), val_lo); } if (!IsNoOp(op, val_hi)) { X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi); - NewLIR2(x86op, rl_result.high_reg, val_hi); + NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi); } StoreValueWide(rl_dest, rl_result); } @@ -1671,15 +1673,15 @@ void X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, // Can we do this directly into the destination registers? if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg && - rl_dest.low_reg == rl_src1.low_reg && rl_dest.high_reg == rl_src1.high_reg && - !IsFpReg(rl_dest.low_reg)) { + rl_dest.reg.GetReg() == rl_src1.reg.GetReg() && rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() && + !IsFpReg(rl_dest.reg.GetReg())) { if (!IsNoOp(op, val_lo)) { X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo); - NewLIR2(x86op, rl_dest.low_reg, val_lo); + NewLIR2(x86op, rl_dest.reg.GetReg(), val_lo); } if (!IsNoOp(op, val_hi)) { X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi); - NewLIR2(x86op, rl_dest.high_reg, val_hi); + NewLIR2(x86op, rl_dest.reg.GetHighReg(), val_hi); } StoreFinalValueWide(rl_dest, rl_dest); @@ -1693,11 +1695,11 @@ void X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_result = ForceTempWide(rl_src1); if (!IsNoOp(op, val_lo)) { X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo); - NewLIR2(x86op, rl_result.low_reg, val_lo); + NewLIR2(x86op, rl_result.reg.GetReg(), val_lo); } if (!IsNoOp(op, val_hi)) { X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi); - NewLIR2(x86op, rl_result.high_reg, val_hi); + NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi); } StoreFinalValueWide(rl_dest, rl_result); @@ -1709,17 +1711,17 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { RegLocation object = LoadValue(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - int result_reg = rl_result.low_reg; + int result_reg = rl_result.reg.GetReg(); // SETcc only works with EAX..EDX. - if (result_reg == object.low_reg || result_reg >= 4) { + if (result_reg == object.reg.GetReg() || result_reg >= 4) { result_reg = AllocTypedTemp(false, kCoreReg); DCHECK_LT(result_reg, 4); } // Assume that there is no match. LoadConstant(result_reg, 0); - LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL); + LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL); int check_class = AllocTypedTemp(false, kCoreReg); @@ -1730,11 +1732,11 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, if (rl_method.location == kLocPhysReg) { if (use_declaring_class) { - LoadWordDisp(rl_method.low_reg, + LoadWordDisp(rl_method.reg.GetReg(), mirror::ArtMethod::DeclaringClassOffset().Int32Value(), check_class); } else { - LoadWordDisp(rl_method.low_reg, + LoadWordDisp(rl_method.reg.GetReg(), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), check_class); LoadWordDisp(check_class, offset_of_type, check_class); @@ -1755,7 +1757,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, // Compare the computed class to the class in the object. DCHECK_EQ(object.location, kLocPhysReg); - OpRegMem(kOpCmp, check_class, object.low_reg, + OpRegMem(kOpCmp, check_class, object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value()); // Set the low byte of the result to 0 or 1 from the compare condition code. @@ -1765,7 +1767,7 @@ void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, null_branchover->target = target; FreeTemp(check_class); if (IsTemp(result_reg)) { - OpRegCopy(rl_result.low_reg, result_reg); + OpRegCopy(rl_result.reg.GetReg(), result_reg); FreeTemp(result_reg); } StoreValue(rl_dest, rl_result); @@ -1818,7 +1820,7 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k RegLocation rl_result = GetReturn(false); // SETcc only works with EAX..EDX. - DCHECK_LT(rl_result.low_reg, 4); + DCHECK_LT(rl_result.reg.GetReg(), 4); // Is the class NULL? LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL); @@ -1830,13 +1832,13 @@ void X86Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_k LIR* branchover = nullptr; if (type_known_final) { // Ensure top 3 bytes of result are 0. - LoadConstant(rl_result.low_reg, 0); + LoadConstant(rl_result.reg.GetReg(), 0); OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Set the low byte of the result to 0 or 1 from the compare condition code. - NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondEq); + NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq); } else { if (!type_known_abstract) { - LoadConstant(rl_result.low_reg, 1); // Assume result succeeds. + LoadConstant(rl_result.reg.GetReg(), 1); // Assume result succeeds. branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); } OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); @@ -1964,7 +1966,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, rl_lhs = LoadValue(rl_lhs, kCoreReg); rl_result = UpdateLoc(rl_dest); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegReg(op, rl_result.low_reg, rl_lhs.low_reg); + OpRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg()); } else { if (shift_op) { // X86 doesn't require masking and must use ECX. @@ -1979,9 +1981,9 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, OpMemReg(op, rl_result, t_reg); FreeTemp(t_reg); return; - } else if (!IsFpReg(rl_result.low_reg)) { + } else if (!IsFpReg(rl_result.reg.GetReg())) { // Can do this directly into the result register - OpRegReg(op, rl_result.low_reg, t_reg); + OpRegReg(op, rl_result.reg.GetReg(), t_reg); FreeTemp(t_reg); StoreFinalValue(rl_dest, rl_result); return; @@ -1990,7 +1992,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, // Three address form, or we can't do directly. rl_lhs = LoadValue(rl_lhs, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, t_reg); + OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), t_reg); FreeTemp(t_reg); } else { // Multiply is 3 operand only (sort of). @@ -2001,11 +2003,11 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, // Can we do this from memory directly? rl_rhs = UpdateLoc(rl_rhs); if (rl_rhs.location != kLocPhysReg) { - OpRegMem(op, rl_result.low_reg, rl_rhs); + OpRegMem(op, rl_result.reg.GetReg(), rl_rhs); StoreFinalValue(rl_dest, rl_result); return; - } else if (!IsFpReg(rl_rhs.low_reg)) { - OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg); + } else if (!IsFpReg(rl_rhs.reg.GetReg())) { + OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg()); StoreFinalValue(rl_dest, rl_result); return; } @@ -2013,17 +2015,17 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, rl_rhs = LoadValue(rl_rhs, kCoreReg); if (rl_result.location != kLocPhysReg) { // Okay, we can do this into memory. - OpMemReg(op, rl_result, rl_rhs.low_reg); + OpMemReg(op, rl_result, rl_rhs.reg.GetReg()); return; - } else if (!IsFpReg(rl_result.low_reg)) { + } else if (!IsFpReg(rl_result.reg.GetReg())) { // Can do this directly into the result register. - OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg); + OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg()); StoreFinalValue(rl_dest, rl_result); return; } else { rl_lhs = LoadValue(rl_lhs, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg); + OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg()); } } else { // Try to use reg/memory instructions. @@ -2035,34 +2037,34 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, rl_lhs = LoadValue(rl_lhs, kCoreReg); rl_rhs = LoadValue(rl_rhs, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg); + OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg()); } else { // We can optimize by moving to result and using memory operands. if (rl_rhs.location != kLocPhysReg) { // Force LHS into result. rl_result = EvalLoc(rl_dest, kCoreReg, true); - LoadValueDirect(rl_lhs, rl_result.low_reg); - OpRegMem(op, rl_result.low_reg, rl_rhs); + LoadValueDirect(rl_lhs, rl_result.reg.GetReg()); + OpRegMem(op, rl_result.reg.GetReg(), rl_rhs); } else if (rl_lhs.location != kLocPhysReg) { // RHS is in a register; LHS is in memory. if (op != kOpSub) { // Force RHS into result and operate on memory. rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegCopy(rl_result.low_reg, rl_rhs.low_reg); - OpRegMem(op, rl_result.low_reg, rl_lhs); + OpRegCopy(rl_result.reg.GetReg(), rl_rhs.reg.GetReg()); + OpRegMem(op, rl_result.reg.GetReg(), rl_lhs); } else { // Subtraction isn't commutative. rl_lhs = LoadValue(rl_lhs, kCoreReg); rl_rhs = LoadValue(rl_rhs, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg); + OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg()); } } else { // Both are in registers. rl_lhs = LoadValue(rl_lhs, kCoreReg); rl_rhs = LoadValue(rl_rhs, kCoreReg); rl_result = EvalLoc(rl_dest, kCoreReg, true); - OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg); + OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg()); } } } @@ -2073,10 +2075,10 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, bool X86Mir2Lir::IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs) { // If we have non-core registers, then we can't do good things. - if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.low_reg)) { + if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.reg.GetReg())) { return false; } - if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.low_reg)) { + if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.reg.GetReg())) { return false; } diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index eea7191c3b..0b8e1ee799 100644 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -49,23 +49,19 @@ namespace art { }; RegLocation X86Mir2Lir::LocCReturn() { - RegLocation res = X86_LOC_C_RETURN; - return res; + return x86_loc_c_return; } RegLocation X86Mir2Lir::LocCReturnWide() { - RegLocation res = X86_LOC_C_RETURN_WIDE; - return res; + return x86_loc_c_return_wide; } RegLocation X86Mir2Lir::LocCReturnFloat() { - RegLocation res = X86_LOC_C_RETURN_FLOAT; - return res; + return x86_loc_c_return_float; } RegLocation X86Mir2Lir::LocCReturnDouble() { - RegLocation res = X86_LOC_C_RETURN_DOUBLE; - return res; + return x86_loc_c_return_double; } // Return a target-dependent special register. @@ -390,19 +386,19 @@ void X86Mir2Lir::ClobberCallerSave() { RegLocation X86Mir2Lir::GetReturnWideAlt() { RegLocation res = LocCReturnWide(); - CHECK(res.low_reg == rAX); - CHECK(res.high_reg == rDX); + CHECK(res.reg.GetReg() == rAX); + CHECK(res.reg.GetHighReg() == rDX); Clobber(rAX); Clobber(rDX); MarkInUse(rAX); MarkInUse(rDX); - MarkPair(res.low_reg, res.high_reg); + MarkPair(res.reg.GetReg(), res.reg.GetHighReg()); return res; } RegLocation X86Mir2Lir::GetReturnAlt() { RegLocation res = LocCReturn(); - res.low_reg = rDX; + res.reg.SetReg(rDX); Clobber(rDX); MarkInUse(rDX); return res; @@ -430,27 +426,21 @@ void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) { NewLIR0(kX86Mfence); #endif } -/* - * Alloc a pair of core registers, or a double. Low reg in low byte, - * high reg in next byte. - */ -int X86Mir2Lir::AllocTypedTempPair(bool fp_hint, - int reg_class) { + +// Alloc a pair of core registers, or a double. +RegStorage X86Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) { int high_reg; int low_reg; - int res = 0; if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) { low_reg = AllocTempDouble(); high_reg = low_reg; // only one allocated! - res = (low_reg & 0xff) | ((high_reg & 0xff) << 8); - return res; + // TODO: take advantage of 64-bit notation. + return RegStorage(RegStorage::k64BitPair, low_reg, high_reg); } - low_reg = AllocTemp(); high_reg = AllocTemp(); - res = (low_reg & 0xff) | ((high_reg & 0xff) << 8); - return res; + return RegStorage(RegStorage::k64BitPair, low_reg, high_reg); } int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) { @@ -493,11 +483,11 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() { void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) { - if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) && - (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) { + if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) && + (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) { // No overlap, free both - FreeTemp(rl_free.low_reg); - FreeTemp(rl_free.high_reg); + FreeTemp(rl_free.reg.GetReg()); + FreeTemp(rl_free.reg.GetHighReg()); } } @@ -601,11 +591,11 @@ RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) { if (match) { // We can reuse;update the register usage info. - loc.low_reg = info_lo->reg; - loc.high_reg = info_lo->reg; // Play nice with existing code. loc.location = kLocPhysReg; loc.vec_len = kVectorLength8; - DCHECK(IsFpReg(loc.low_reg)); + // TODO: use k64BitVector + loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_lo->reg); + DCHECK(IsFpReg(loc.reg.GetReg())); return loc; } // We can't easily reuse; clobber and free any overlaps. @@ -635,11 +625,10 @@ RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) { } if (match) { // Can reuse - update the register usage info - loc.low_reg = info_lo->reg; - loc.high_reg = info_hi->reg; + loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg); loc.location = kLocPhysReg; - MarkPair(loc.low_reg, loc.high_reg); - DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0)); + MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg()); + DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0)); return loc; } // Can't easily reuse - clobber and free any overlaps @@ -663,7 +652,6 @@ RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) { // TODO: Reunify with common code after 'pair mess' has been fixed RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) { DCHECK(loc.wide); - int32_t new_regs; int32_t low_reg; int32_t high_reg; @@ -671,38 +659,37 @@ RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) /* If it is already in a register, we can assume proper form. Is it the right reg class? */ if (loc.location == kLocPhysReg) { - DCHECK_EQ(IsFpReg(loc.low_reg), loc.IsVectorScalar()); - if (!RegClassMatches(reg_class, loc.low_reg)) { + DCHECK_EQ(IsFpReg(loc.reg.GetReg()), loc.IsVectorScalar()); + if (!RegClassMatches(reg_class, loc.reg.GetReg())) { /* It is the wrong register class. Reallocate and copy. */ - if (!IsFpReg(loc.low_reg)) { + if (!IsFpReg(loc.reg.GetReg())) { // We want this in a FP reg, and it is in core registers. DCHECK(reg_class != kCoreReg); // Allocate this into any FP reg, and mark it with the right size. low_reg = AllocTypedTemp(true, reg_class); - OpVectorRegCopyWide(low_reg, loc.low_reg, loc.high_reg); - CopyRegInfo(low_reg, loc.low_reg); - Clobber(loc.low_reg); - Clobber(loc.high_reg); - loc.low_reg = low_reg; - loc.high_reg = low_reg; // Play nice with existing code. + OpVectorRegCopyWide(low_reg, loc.reg.GetReg(), loc.reg.GetHighReg()); + CopyRegInfo(low_reg, loc.reg.GetReg()); + Clobber(loc.reg.GetReg()); + Clobber(loc.reg.GetHighReg()); + loc.reg.SetReg(low_reg); + loc.reg.SetHighReg(low_reg); // Play nice with existing code. loc.vec_len = kVectorLength8; } else { // The value is in a FP register, and we want it in a pair of core registers. DCHECK_EQ(reg_class, kCoreReg); - DCHECK_EQ(loc.low_reg, loc.high_reg); - new_regs = AllocTypedTempPair(false, kCoreReg); // Force to core registers. - low_reg = new_regs & 0xff; - high_reg = (new_regs >> 8) & 0xff; + DCHECK_EQ(loc.reg.GetReg(), loc.reg.GetHighReg()); + RegStorage new_regs = AllocTypedTempWide(false, kCoreReg); // Force to core registers. + low_reg = new_regs.GetReg(); + high_reg = new_regs.GetHighReg(); DCHECK_NE(low_reg, high_reg); - OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg); - CopyRegInfo(low_reg, loc.low_reg); - CopyRegInfo(high_reg, loc.high_reg); - Clobber(loc.low_reg); - Clobber(loc.high_reg); - loc.low_reg = low_reg; - loc.high_reg = high_reg; - MarkPair(loc.low_reg, loc.high_reg); - DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0)); + OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg()); + CopyRegInfo(low_reg, loc.reg.GetReg()); + CopyRegInfo(high_reg, loc.reg.GetHighReg()); + Clobber(loc.reg.GetReg()); + Clobber(loc.reg.GetHighReg()); + loc.reg = new_regs; + MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg()); + DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0)); } } return loc; @@ -711,21 +698,20 @@ RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) DCHECK_NE(loc.s_reg_low, INVALID_SREG); DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG); - new_regs = AllocTypedTempPair(loc.fp, reg_class); - loc.low_reg = new_regs & 0xff; - loc.high_reg = (new_regs >> 8) & 0xff; + loc.reg = AllocTypedTempWide(loc.fp, reg_class); - if (loc.low_reg == loc.high_reg) { - DCHECK(IsFpReg(loc.low_reg)); + // FIXME: take advantage of RegStorage notation. + if (loc.reg.GetReg() == loc.reg.GetHighReg()) { + DCHECK(IsFpReg(loc.reg.GetReg())); loc.vec_len = kVectorLength8; } else { - MarkPair(loc.low_reg, loc.high_reg); + MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg()); } if (update) { loc.location = kLocPhysReg; - MarkLive(loc.low_reg, loc.s_reg_low); - if (loc.low_reg != loc.high_reg) { - MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low)); + MarkLive(loc.reg.GetReg(), loc.s_reg_low); + if (loc.reg.GetReg() != loc.reg.GetHighReg()) { + MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low)); } } return loc; @@ -741,14 +727,14 @@ RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) { loc = UpdateLoc(loc); if (loc.location == kLocPhysReg) { - if (!RegClassMatches(reg_class, loc.low_reg)) { + if (!RegClassMatches(reg_class, loc.reg.GetReg())) { /* Wrong register class. Realloc, copy and transfer ownership. */ new_reg = AllocTypedTemp(loc.fp, reg_class); - OpRegCopy(new_reg, loc.low_reg); - CopyRegInfo(new_reg, loc.low_reg); - Clobber(loc.low_reg); - loc.low_reg = new_reg; - if (IsFpReg(loc.low_reg) && reg_class != kCoreReg) + OpRegCopy(new_reg, loc.reg.GetReg()); + CopyRegInfo(new_reg, loc.reg.GetReg()); + Clobber(loc.reg.GetReg()); + loc.reg.SetReg(new_reg); + if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg) loc.vec_len = kVectorLength4; } return loc; @@ -756,14 +742,13 @@ RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) { DCHECK_NE(loc.s_reg_low, INVALID_SREG); - new_reg = AllocTypedTemp(loc.fp, reg_class); - loc.low_reg = new_reg; - if (IsFpReg(loc.low_reg) && reg_class != kCoreReg) + loc.reg = RegStorage(RegStorage::k32BitSolo, AllocTypedTemp(loc.fp, reg_class)); + if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg) loc.vec_len = kVectorLength4; if (update) { loc.location = kLocPhysReg; - MarkLive(loc.low_reg, loc.s_reg_low); + MarkLive(loc.reg.GetReg(), loc.s_reg_low); } return loc; } @@ -776,15 +761,15 @@ int X86Mir2Lir::AllocTempDouble() { // TODO: Reunify with common code after 'pair mess' has been fixed void X86Mir2Lir::ResetDefLocWide(RegLocation rl) { DCHECK(rl.wide); - RegisterInfo* p_low = IsTemp(rl.low_reg); - if (IsFpReg(rl.low_reg)) { + RegisterInfo* p_low = IsTemp(rl.reg.GetReg()); + if (IsFpReg(rl.reg.GetReg())) { // We are using only the low register. if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) { NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low); } - ResetDef(rl.low_reg); + ResetDef(rl.reg.GetReg()); } else { - RegisterInfo* p_high = IsTemp(rl.high_reg); + RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg()); if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) { DCHECK(p_low->pair); NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low); @@ -792,8 +777,8 @@ void X86Mir2Lir::ResetDefLocWide(RegLocation rl) { if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) { DCHECK(p_high->pair); } - ResetDef(rl.low_reg); - ResetDef(rl.high_reg); + ResetDef(rl.reg.GetReg()); + ResetDef(rl.reg.GetHighReg()); } } @@ -832,8 +817,8 @@ void X86Mir2Lir::DumpRegLocation(RegLocation loc) { << (loc.high_word ? " h" : " ") << (loc.home ? " H" : " ") << " vec_len: " << loc.vec_len - << ", low: " << static_cast<int>(loc.low_reg) - << ", high: " << static_cast<int>(loc.high_reg) + << ", low: " << static_cast<int>(loc.reg.GetReg()) + << ", high: " << static_cast<int>(loc.reg.GetHighReg()) << ", s_reg: " << loc.s_reg_low << ", orig: " << loc.orig_sreg; } @@ -1036,8 +1021,8 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { // Runtime start index. rl_start = UpdateLoc(rl_start); if (rl_start.location == kLocPhysReg) { - length_compare = OpCmpBranch(kCondLe, rCX, rl_start.low_reg, nullptr); - OpRegReg(kOpSub, rCX, rl_start.low_reg); + length_compare = OpCmpBranch(kCondLe, rCX, rl_start.reg.GetReg(), nullptr); + OpRegReg(kOpSub, rCX, rl_start.reg.GetReg()); } else { // Compare to memory to avoid a register load. Handle pushed EDI. int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t); @@ -1066,13 +1051,13 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { } } else { if (rl_start.location == kLocPhysReg) { - if (rl_start.low_reg == rDI) { + if (rl_start.reg.GetReg() == rDI) { // We have a slight problem here. We are already using RDI! // Grab the value from the stack. LoadWordDisp(rX86_SP, 0, rDX); OpLea(rDI, rBX, rDX, 1, 0); } else { - OpLea(rDI, rBX, rl_start.low_reg, 1, 0); + OpLea(rDI, rBX, rl_start.reg.GetReg(), 1, 0); } } else { OpRegCopy(rDI, rBX); @@ -1094,14 +1079,14 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { // index = ((curr_ptr - orig_ptr) / 2) - 1. OpRegReg(kOpSub, rDI, rBX); OpRegImm(kOpAsr, rDI, 1); - NewLIR3(kX86Lea32RM, rl_return.low_reg, rDI, -1); + NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rDI, -1); LIR *all_done = NewLIR1(kX86Jmp8, 0); // Failed to match; return -1. LIR *not_found = NewLIR0(kPseudoTargetLabel); length_compare->target = not_found; failed_branch->target = not_found; - LoadConstantNoClobber(rl_return.low_reg, -1); + LoadConstantNoClobber(rl_return.reg.GetReg(), -1); // And join up at the end. all_done->target = NewLIR0(kPseudoTargetLabel); diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index 48a39bb5b4..d5d6b0e348 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -514,7 +514,7 @@ LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) { // We don't know the proper offset for the value, so pick one that will force // 4 byte offset. We will fix this up in the assembler later to have the right // value. - res = LoadBaseDisp(rl_method.low_reg, 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG); + res = LoadBaseDisp(rl_method.reg.GetReg(), 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG); res->target = data_target; res->flags.fixup = kFixupLoad; SetMemRefType(res, true, kLiteral); @@ -714,7 +714,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, opcode = is_array ? kX86Mov8AR : kX86Mov8MR; break; default: - LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody"; + LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody"; } if (!is_array) { diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h index 4064bd6550..1df9ab1ec3 100644 --- a/compiler/dex/quick/x86/x86_lir.h +++ b/compiler/dex/quick/x86/x86_lir.h @@ -126,13 +126,6 @@ namespace art { /* Mask to strip off fp flags */ #define X86_FP_REG_MASK 0xF -// RegisterLocation templates return values (rAX, rAX/rDX or XMM0). -// location, wide, defined, const, fp, core, ref, high_word, home, vec_len, low_reg, high_reg, s_reg_low -#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG} -#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, rDX, INVALID_SREG, INVALID_SREG} -#define X86_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG} -#define X86_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8, fr0, fr0, INVALID_SREG, INVALID_SREG} - enum X86ResourceEncodingPos { kX86GPReg0 = 0, kX86RegSP = 4, @@ -211,6 +204,22 @@ enum X86NativeRegisterPool { #define rX86_COUNT rCX #define rX86_PC INVALID_REG +// RegisterLocation templates return values (r_V0, or r_V0/r_V1). +const RegLocation x86_loc_c_return + {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG}; +const RegLocation x86_loc_c_return_wide + {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, + RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG}; +// TODO: update to use k32BitVector (must encode in 7 bits, including fp flag). +const RegLocation x86_loc_c_return_float + {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4, + RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG}; +// TODO: update to use k64BitVector (must encode in 7 bits, including fp flag). +const RegLocation x86_loc_c_return_double + {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8, + RegStorage(RegStorage::k64BitPair, fr0, fr1), INVALID_SREG, INVALID_SREG}; + /* * The following enum defines the list of supported X86 instructions by the * assembler. Their corresponding EncodingMap positions will be defined in diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h new file mode 100644 index 0000000000..c59617edea --- /dev/null +++ b/compiler/dex/reg_storage.h @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_REG_STORAGE_H_ +#define ART_COMPILER_DEX_REG_STORAGE_H_ + + +namespace art { + +/* + * Representation of the physical register, register pair or vector holding a Dalvik value. + * The basic configuration of the storage (i.e. solo reg, pair, vector) is common across all + * targets, but the encoding of the actual storage element is target independent. + * + * The two most-significant bits describe the basic shape of the storage, while meaning of the + * lower 14 bits depends on the shape: + * + * [PW] + * P: 0 -> pair, 1 -> solo (or vector) + * W: 1 -> 64 bits, 0 -> 32 bits + * + * [00] [xxxxxxxxxxxxxx] Invalid (typically all zeros) + * [01] [HHHHHHH] [LLLLLLL] 64-bit storage, composed of 2 32-bit registers + * [10] [0] [xxxxxx] [RRRRRRR] 32-bit solo register + * [11] [0] [xxxxxx] [RRRRRRR] 64-bit solo register + * [10] [1] [xxxxxx] [VVVVVVV] 32-bit vector storage + * [11] [1] [xxxxxx] [VVVVVVV] 64-bit vector storage + * + * x - don't care + * L - low register number of a pair + * H - high register number of a pair + * R - register number of a solo reg + * V - vector description + * + * Note that in all non-invalid cases, the low 7 bits must be sufficient to describe + * whether the storage element is floating point (see IsFloatReg()). + * + */ + +class RegStorage { + public: + enum RegStorageKind { + kInvalid = 0x0000, + k64BitPair = 0x4000, + k32BitSolo = 0x8000, + k64BitSolo = 0xc000, + k32BitVector = 0xa000, + k64BitVector = 0xe000, + kPairMask = 0x8000, + kPair = 0x0000, + kSizeMask = 0x4000, + k64Bit = 0x4000, + k32Bit = 0x0000, + kVectorMask = 0xa000, + kVector = 0xa000, + kSolo = 0x8000, + kShapeMask = 0xc000, + kKindMask = 0xe000 + }; + + static const uint16_t kRegValMask = 0x007f; + static const uint16_t kHighRegShift = 7; + static const uint16_t kHighRegMask = kRegValMask << kHighRegShift; + + RegStorage(RegStorageKind rs_kind, int reg) { + DCHECK_NE(rs_kind & kShapeMask, kInvalid); + DCHECK_NE(rs_kind & kShapeMask, k64BitPair); + DCHECK_EQ(rs_kind & ~kKindMask, 0); + DCHECK_EQ(reg & ~kRegValMask, 0); + reg_ = rs_kind | reg; + } + RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg) { + DCHECK_EQ(rs_kind, k64BitPair); + DCHECK_EQ(low_reg & ~kRegValMask, 0); + DCHECK_EQ(high_reg & ~kRegValMask, 0); + reg_ = rs_kind | (high_reg << kHighRegShift) | low_reg; + } + explicit RegStorage(uint16_t val) : reg_(val) {} + RegStorage() : reg_(kInvalid) {} + ~RegStorage() {} + + bool IsInvalid() const { + return ((reg_ & kShapeMask) == kInvalid); + } + + bool Is32Bit() const { + DCHECK(!IsInvalid()); + return ((reg_ & kSizeMask) == k32Bit); + } + + bool Is64Bit() const { + DCHECK(!IsInvalid()); + return ((reg_ & kSizeMask) == k64Bit); + } + + bool IsPair() const { + DCHECK(!IsInvalid()); + return ((reg_ & kPairMask) == kPair); + } + + bool IsSolo() const { + DCHECK(!IsInvalid()); + return ((reg_ & kVectorMask) == kSolo); + } + + bool IsVector() const { + DCHECK(!IsInvalid()); + return ((reg_ & kVectorMask) == kVector); + } + + // Used to retrieve either the low register of a pair, or the only register. + int GetReg() const { + DCHECK(!IsInvalid()); + return (reg_ & kRegValMask); + } + + void SetReg(int reg) { + DCHECK(!IsInvalid()); + reg_ = (reg_ & ~kRegValMask) | reg; + DCHECK_EQ(GetReg(), reg); + } + + // Retrieve the most significant register of a pair. + int GetHighReg() const { + DCHECK(IsPair()); + return (reg_ & kHighRegMask) >> kHighRegShift; + } + + void SetHighReg(int reg) { + DCHECK(IsPair()); + reg_ = (reg_ & ~kHighRegMask) | (reg << kHighRegShift); + DCHECK_EQ(GetHighReg(), reg); + } + + int GetRawBits() const { + return reg_; + } + + private: + uint16_t reg_; +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_REG_STORAGE_H_ diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc index f8dc223af7..4d2c05166b 100644 --- a/compiler/dex/vreg_analysis.cc +++ b/compiler/dex/vreg_analysis.cc @@ -379,16 +379,14 @@ void MIRGraph::DumpRegLocTable(RegLocation* table, int count) { Mir2Lir* cg = static_cast<Mir2Lir*>(cu_->cg.get()); if (cg != NULL) { for (int i = 0; i < count; i++) { - LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c %c%d %c%d S%d", + LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c 0x%04x S%d", table[i].orig_sreg, storage_name[table[i].location], table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U', table[i].fp ? 'F' : table[i].ref ? 'R' :'C', table[i].is_const ? 'c' : 'n', table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't', - cg->IsFpReg(table[i].low_reg) ? 's' : 'r', - table[i].low_reg & cg->FpRegMask(), - cg->IsFpReg(table[i].high_reg) ? 's' : 'r', - table[i].high_reg & cg->FpRegMask(), table[i].s_reg_low); + table[i].reg.GetRawBits(), + table[i].s_reg_low); } } else { // Either pre-regalloc or Portable. @@ -404,9 +402,9 @@ void MIRGraph::DumpRegLocTable(RegLocation* table, int count) { } } -static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, - kVectorNotUsed, INVALID_REG, INVALID_REG, INVALID_SREG, - INVALID_SREG}; +// FIXME - will likely need to revisit all uses of this. +static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0, kVectorNotUsed, + RegStorage(), INVALID_SREG, INVALID_SREG}; void MIRGraph::InitRegLocations() { /* Allocate the location map */ |