diff options
author | Dan Gohman <gohman@apple.com> | 2008-07-27 21:46:04 +0000 |
---|---|---|
committer | Dan Gohman <gohman@apple.com> | 2008-07-27 21:46:04 +0000 |
commit | 475871a144eb604ddaf37503397ba0941442e5fb (patch) | |
tree | adeddbc1f7871c2215b6ca4d9d914eee53a33961 /lib | |
parent | 8968450305c28444edc3c272d8752a8db0c2f34a (diff) | |
download | external_llvm-475871a144eb604ddaf37503397ba0941442e5fb.tar.gz external_llvm-475871a144eb604ddaf37503397ba0941442e5fb.tar.bz2 external_llvm-475871a144eb604ddaf37503397ba0941442e5fb.zip |
Rename SDOperand to SDValue.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@54128 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
49 files changed, 5147 insertions, 5153 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index e6c3314b24..747a0b1e54 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -70,7 +70,7 @@ namespace { /// visit - call the node-specific routine that knows how to fold each /// particular type of node. - SDOperand visit(SDNode *N); + SDValue visit(SDNode *N); public: /// AddToWorkList - Add to the work list making sure it's instance is at the @@ -87,16 +87,16 @@ namespace { WorkList.end()); } - SDOperand CombineTo(SDNode *N, const SDOperand *To, unsigned NumTo, + SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, bool AddTo = true); - SDOperand CombineTo(SDNode *N, SDOperand Res, bool AddTo = true) { + SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) { return CombineTo(N, &Res, 1, AddTo); } - SDOperand CombineTo(SDNode *N, SDOperand Res0, SDOperand Res1, + SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true) { - SDOperand To[] = { Res0, Res1 }; + SDValue To[] = { Res0, Res1 }; return CombineTo(N, To, 2, AddTo); } @@ -105,12 +105,12 @@ namespace { /// SimplifyDemandedBits - Check the specified integer node value to see if /// it can be simplified or if things it uses can be simplified by bit /// propagation. If so, return true. - bool SimplifyDemandedBits(SDOperand Op) { + bool SimplifyDemandedBits(SDValue Op) { APInt Demanded = APInt::getAllOnesValue(Op.getValueSizeInBits()); return SimplifyDemandedBits(Op, Demanded); } - bool SimplifyDemandedBits(SDOperand Op, const APInt &Demanded); + bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded); bool CombineToPreIndexedLoadStore(SDNode *N); bool CombineToPostIndexedLoadStore(SDNode *N); @@ -119,122 +119,122 @@ namespace { /// combine - call the node-specific routine that knows how to fold each /// particular type of node. If that doesn't do anything, try the /// target-specific DAG combines. - SDOperand combine(SDNode *N); + SDValue combine(SDNode *N); // Visitation implementation - Implement dag node combining for different // node types. The semantics are as follows: // Return Value: - // SDOperand.Val == 0 - No change was made - // SDOperand.Val == N - N was replaced, is dead, and is already handled. + // SDValue.Val == 0 - No change was made + // SDValue.Val == N - N was replaced, is dead, and is already handled. // otherwise - N should be replaced by the returned Operand. // - SDOperand visitTokenFactor(SDNode *N); - SDOperand visitMERGE_VALUES(SDNode *N); - SDOperand visitADD(SDNode *N); - SDOperand visitSUB(SDNode *N); - SDOperand visitADDC(SDNode *N); - SDOperand visitADDE(SDNode *N); - SDOperand visitMUL(SDNode *N); - SDOperand visitSDIV(SDNode *N); - SDOperand visitUDIV(SDNode *N); - SDOperand visitSREM(SDNode *N); - SDOperand visitUREM(SDNode *N); - SDOperand visitMULHU(SDNode *N); - SDOperand visitMULHS(SDNode *N); - SDOperand visitSMUL_LOHI(SDNode *N); - SDOperand visitUMUL_LOHI(SDNode *N); - SDOperand visitSDIVREM(SDNode *N); - SDOperand visitUDIVREM(SDNode *N); - SDOperand visitAND(SDNode *N); - SDOperand visitOR(SDNode *N); - SDOperand visitXOR(SDNode *N); - SDOperand SimplifyVBinOp(SDNode *N); - SDOperand visitSHL(SDNode *N); - SDOperand visitSRA(SDNode *N); - SDOperand visitSRL(SDNode *N); - SDOperand visitCTLZ(SDNode *N); - SDOperand visitCTTZ(SDNode *N); - SDOperand visitCTPOP(SDNode *N); - SDOperand visitSELECT(SDNode *N); - SDOperand visitSELECT_CC(SDNode *N); - SDOperand visitSETCC(SDNode *N); - SDOperand visitSIGN_EXTEND(SDNode *N); - SDOperand visitZERO_EXTEND(SDNode *N); - SDOperand visitANY_EXTEND(SDNode *N); - SDOperand visitSIGN_EXTEND_INREG(SDNode *N); - SDOperand visitTRUNCATE(SDNode *N); - SDOperand visitBIT_CONVERT(SDNode *N); - SDOperand visitBUILD_PAIR(SDNode *N); - SDOperand visitFADD(SDNode *N); - SDOperand visitFSUB(SDNode *N); - SDOperand visitFMUL(SDNode *N); - SDOperand visitFDIV(SDNode *N); - SDOperand visitFREM(SDNode *N); - SDOperand visitFCOPYSIGN(SDNode *N); - SDOperand visitSINT_TO_FP(SDNode *N); - SDOperand visitUINT_TO_FP(SDNode *N); - SDOperand visitFP_TO_SINT(SDNode *N); - SDOperand visitFP_TO_UINT(SDNode *N); - SDOperand visitFP_ROUND(SDNode *N); - SDOperand visitFP_ROUND_INREG(SDNode *N); - SDOperand visitFP_EXTEND(SDNode *N); - SDOperand visitFNEG(SDNode *N); - SDOperand visitFABS(SDNode *N); - SDOperand visitBRCOND(SDNode *N); - SDOperand visitBR_CC(SDNode *N); - SDOperand visitLOAD(SDNode *N); - SDOperand visitSTORE(SDNode *N); - SDOperand visitINSERT_VECTOR_ELT(SDNode *N); - SDOperand visitEXTRACT_VECTOR_ELT(SDNode *N); - SDOperand visitBUILD_VECTOR(SDNode *N); - SDOperand visitCONCAT_VECTORS(SDNode *N); - SDOperand visitVECTOR_SHUFFLE(SDNode *N); - - SDOperand XformToShuffleWithZero(SDNode *N); - SDOperand ReassociateOps(unsigned Opc, SDOperand LHS, SDOperand RHS); + SDValue visitTokenFactor(SDNode *N); + SDValue visitMERGE_VALUES(SDNode *N); + SDValue visitADD(SDNode *N); + SDValue visitSUB(SDNode *N); + SDValue visitADDC(SDNode *N); + SDValue visitADDE(SDNode *N); + SDValue visitMUL(SDNode *N); + SDValue visitSDIV(SDNode *N); + SDValue visitUDIV(SDNode *N); + SDValue visitSREM(SDNode *N); + SDValue visitUREM(SDNode *N); + SDValue visitMULHU(SDNode *N); + SDValue visitMULHS(SDNode *N); + SDValue visitSMUL_LOHI(SDNode *N); + SDValue visitUMUL_LOHI(SDNode *N); + SDValue visitSDIVREM(SDNode *N); + SDValue visitUDIVREM(SDNode *N); + SDValue visitAND(SDNode *N); + SDValue visitOR(SDNode *N); + SDValue visitXOR(SDNode *N); + SDValue SimplifyVBinOp(SDNode *N); + SDValue visitSHL(SDNode *N); + SDValue visitSRA(SDNode *N); + SDValue visitSRL(SDNode *N); + SDValue visitCTLZ(SDNode *N); + SDValue visitCTTZ(SDNode *N); + SDValue visitCTPOP(SDNode *N); + SDValue visitSELECT(SDNode *N); + SDValue visitSELECT_CC(SDNode *N); + SDValue visitSETCC(SDNode *N); + SDValue visitSIGN_EXTEND(SDNode *N); + SDValue visitZERO_EXTEND(SDNode *N); + SDValue visitANY_EXTEND(SDNode *N); + SDValue visitSIGN_EXTEND_INREG(SDNode *N); + SDValue visitTRUNCATE(SDNode *N); + SDValue visitBIT_CONVERT(SDNode *N); + SDValue visitBUILD_PAIR(SDNode *N); + SDValue visitFADD(SDNode *N); + SDValue visitFSUB(SDNode *N); + SDValue visitFMUL(SDNode *N); + SDValue visitFDIV(SDNode *N); + SDValue visitFREM(SDNode *N); + SDValue visitFCOPYSIGN(SDNode *N); + SDValue visitSINT_TO_FP(SDNode *N); + SDValue visitUINT_TO_FP(SDNode *N); + SDValue visitFP_TO_SINT(SDNode *N); + SDValue visitFP_TO_UINT(SDNode *N); + SDValue visitFP_ROUND(SDNode *N); + SDValue visitFP_ROUND_INREG(SDNode *N); + SDValue visitFP_EXTEND(SDNode *N); + SDValue visitFNEG(SDNode *N); + SDValue visitFABS(SDNode *N); + SDValue visitBRCOND(SDNode *N); + SDValue visitBR_CC(SDNode *N); + SDValue visitLOAD(SDNode *N); + SDValue visitSTORE(SDNode *N); + SDValue visitINSERT_VECTOR_ELT(SDNode *N); + SDValue visitEXTRACT_VECTOR_ELT(SDNode *N); + SDValue visitBUILD_VECTOR(SDNode *N); + SDValue visitCONCAT_VECTORS(SDNode *N); + SDValue visitVECTOR_SHUFFLE(SDNode *N); + + SDValue XformToShuffleWithZero(SDNode *N); + SDValue ReassociateOps(unsigned Opc, SDValue LHS, SDValue RHS); - SDOperand visitShiftByConstant(SDNode *N, unsigned Amt); + SDValue visitShiftByConstant(SDNode *N, unsigned Amt); - bool SimplifySelectOps(SDNode *SELECT, SDOperand LHS, SDOperand RHS); - SDOperand SimplifyBinOpWithSameOpcodeHands(SDNode *N); - SDOperand SimplifySelect(SDOperand N0, SDOperand N1, SDOperand N2); - SDOperand SimplifySelectCC(SDOperand N0, SDOperand N1, SDOperand N2, - SDOperand N3, ISD::CondCode CC, + bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS); + SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N); + SDValue SimplifySelect(SDValue N0, SDValue N1, SDValue N2); + SDValue SimplifySelectCC(SDValue N0, SDValue N1, SDValue N2, + SDValue N3, ISD::CondCode CC, bool NotExtCompare = false); - SDOperand SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, + SDValue SimplifySetCC(MVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans = true); - SDOperand SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, + SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, unsigned HiOp); - SDOperand CombineConsecutiveLoads(SDNode *N, MVT VT); - SDOperand ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, MVT); - SDOperand BuildSDIV(SDNode *N); - SDOperand BuildUDIV(SDNode *N); - SDNode *MatchRotate(SDOperand LHS, SDOperand RHS); - SDOperand ReduceLoadWidth(SDNode *N); + SDValue CombineConsecutiveLoads(SDNode *N, MVT VT); + SDValue ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, MVT); + SDValue BuildSDIV(SDNode *N); + SDValue BuildUDIV(SDNode *N); + SDNode *MatchRotate(SDValue LHS, SDValue RHS); + SDValue ReduceLoadWidth(SDNode *N); - SDOperand GetDemandedBits(SDOperand V, const APInt &Mask); + SDValue GetDemandedBits(SDValue V, const APInt &Mask); /// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes, /// looking for aliasing nodes and adding them to the Aliases vector. - void GatherAllAliases(SDNode *N, SDOperand OriginalChain, - SmallVector<SDOperand, 8> &Aliases); + void GatherAllAliases(SDNode *N, SDValue OriginalChain, + SmallVector<SDValue, 8> &Aliases); /// isAlias - Return true if there is any possibility that the two addresses /// overlap. - bool isAlias(SDOperand Ptr1, int64_t Size1, + bool isAlias(SDValue Ptr1, int64_t Size1, const Value *SrcValue1, int SrcValueOffset1, - SDOperand Ptr2, int64_t Size2, + SDValue Ptr2, int64_t Size2, const Value *SrcValue2, int SrcValueOffset2); /// FindAliasInfo - Extracts the relevant alias information from the memory /// node. Returns true if the operand was a load. bool FindAliasInfo(SDNode *N, - SDOperand &Ptr, int64_t &Size, + SDValue &Ptr, int64_t &Size, const Value *&SrcValue, int &SrcValueOffset); /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, /// looking for a better chain (aliasing node.) - SDOperand FindBetterChain(SDNode *N, SDOperand Chain); + SDValue FindBetterChain(SDNode *N, SDValue Chain); public: DAGCombiner(SelectionDAG &D, AliasAnalysis &A) @@ -276,19 +276,19 @@ void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) { ((DAGCombiner*)DC)->AddToWorkList(N); } -SDOperand TargetLowering::DAGCombinerInfo:: -CombineTo(SDNode *N, const std::vector<SDOperand> &To) { +SDValue TargetLowering::DAGCombinerInfo:: +CombineTo(SDNode *N, const std::vector<SDValue> &To) { return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size()); } -SDOperand TargetLowering::DAGCombinerInfo:: -CombineTo(SDNode *N, SDOperand Res) { +SDValue TargetLowering::DAGCombinerInfo:: +CombineTo(SDNode *N, SDValue Res) { return ((DAGCombiner*)DC)->CombineTo(N, Res); } -SDOperand TargetLowering::DAGCombinerInfo:: -CombineTo(SDNode *N, SDOperand Res0, SDOperand Res1) { +SDValue TargetLowering::DAGCombinerInfo:: +CombineTo(SDNode *N, SDValue Res0, SDValue Res1) { return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1); } @@ -300,7 +300,7 @@ CombineTo(SDNode *N, SDOperand Res0, SDOperand Res1) { /// isNegatibleForFree - Return 1 if we can compute the negated form of the /// specified expression for the same cost as the expression itself, or 2 if we /// can compute the negated form more cheaply than the expression itself. -static char isNegatibleForFree(SDOperand Op, bool AfterLegalize, +static char isNegatibleForFree(SDValue Op, bool AfterLegalize, unsigned Depth = 0) { // No compile time optimizations on this type. if (Op.getValueType() == MVT::ppcf128) @@ -356,7 +356,7 @@ static char isNegatibleForFree(SDOperand Op, bool AfterLegalize, /// GetNegatedExpression - If isNegatibleForFree returns true, this function /// returns the newly negated expression. -static SDOperand GetNegatedExpression(SDOperand Op, SelectionDAG &DAG, +static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, bool AfterLegalize, unsigned Depth = 0) { // fneg is removable even if it has multiple uses. if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0); @@ -436,8 +436,8 @@ static SDOperand GetNegatedExpression(SDOperand Op, SelectionDAG &DAG, // Also, set the incoming LHS, RHS, and CC references to the appropriate // nodes based on the type of node we are checking. This simplifies life a // bit for the callers. -static bool isSetCCEquivalent(SDOperand N, SDOperand &LHS, SDOperand &RHS, - SDOperand &CC) { +static bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, + SDValue &CC) { if (N.getOpcode() == ISD::SETCC) { LHS = N.getOperand(0); RHS = N.getOperand(1); @@ -460,24 +460,24 @@ static bool isSetCCEquivalent(SDOperand N, SDOperand &LHS, SDOperand &RHS, // isOneUseSetCC - Return true if this is a SetCC-equivalent operation with only // one use. If this is true, it allows the users to invert the operation for // free when it is profitable to do so. -static bool isOneUseSetCC(SDOperand N) { - SDOperand N0, N1, N2; +static bool isOneUseSetCC(SDValue N) { + SDValue N0, N1, N2; if (isSetCCEquivalent(N, N0, N1, N2) && N.Val->hasOneUse()) return true; return false; } -SDOperand DAGCombiner::ReassociateOps(unsigned Opc, SDOperand N0, SDOperand N1){ +SDValue DAGCombiner::ReassociateOps(unsigned Opc, SDValue N0, SDValue N1){ MVT VT = N0.getValueType(); // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one use // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2)) if (N0.getOpcode() == Opc && isa<ConstantSDNode>(N0.getOperand(1))) { if (isa<ConstantSDNode>(N1)) { - SDOperand OpNode = DAG.getNode(Opc, VT, N0.getOperand(1), N1); + SDValue OpNode = DAG.getNode(Opc, VT, N0.getOperand(1), N1); AddToWorkList(OpNode.Val); return DAG.getNode(Opc, VT, OpNode, N0.getOperand(0)); } else if (N0.hasOneUse()) { - SDOperand OpNode = DAG.getNode(Opc, VT, N0.getOperand(0), N1); + SDValue OpNode = DAG.getNode(Opc, VT, N0.getOperand(0), N1); AddToWorkList(OpNode.Val); return DAG.getNode(Opc, VT, OpNode, N0.getOperand(1)); } @@ -486,20 +486,20 @@ SDOperand DAGCombiner::ReassociateOps(unsigned Opc, SDOperand N0, SDOperand N1){ // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2)) if (N1.getOpcode() == Opc && isa<ConstantSDNode>(N1.getOperand(1))) { if (isa<ConstantSDNode>(N0)) { - SDOperand OpNode = DAG.getNode(Opc, VT, N1.getOperand(1), N0); + SDValue OpNode = DAG.getNode(Opc, VT, N1.getOperand(1), N0); AddToWorkList(OpNode.Val); return DAG.getNode(Opc, VT, OpNode, N1.getOperand(0)); } else if (N1.hasOneUse()) { - SDOperand OpNode = DAG.getNode(Opc, VT, N1.getOperand(0), N0); + SDValue OpNode = DAG.getNode(Opc, VT, N1.getOperand(0), N0); AddToWorkList(OpNode.Val); return DAG.getNode(Opc, VT, OpNode, N1.getOperand(1)); } } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::CombineTo(SDNode *N, const SDOperand *To, unsigned NumTo, - bool AddTo) { +SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, + bool AddTo) { assert(N->getNumValues() == NumTo && "Broken CombineTo call!"); ++NodesCombined; DOUT << "\nReplacing.1 "; DEBUG(N->dump(&DAG)); @@ -522,13 +522,13 @@ SDOperand DAGCombiner::CombineTo(SDNode *N, const SDOperand *To, unsigned NumTo, // Finally, since the node is now dead, remove it from the graph. DAG.DeleteNode(N); - return SDOperand(N, 0); + return SDValue(N, 0); } /// SimplifyDemandedBits - Check the specified integer node value to see if /// it can be simplified or if things it uses can be simplified by bit /// propagation. If so, return true. -bool DAGCombiner::SimplifyDemandedBits(SDOperand Op, const APInt &Demanded) { +bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) { TargetLowering::TargetLoweringOpt TLO(DAG, AfterLegalize); APInt KnownZero, KnownOne; if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) @@ -590,7 +590,7 @@ void DAGCombiner::Run(bool RunningAfterLegalize) { // The root of the dag may dangle to deleted nodes until the dag combiner is // done. Set it to null to avoid confusion. - DAG.setRoot(SDOperand()); + DAG.setRoot(SDValue()); // while the worklist isn't empty, inspect the node on the end of it and // try and combine it. @@ -609,7 +609,7 @@ void DAGCombiner::Run(bool RunningAfterLegalize) { continue; } - SDOperand RV = combine(N); + SDValue RV = combine(N); if (RV.Val == 0) continue; @@ -636,7 +636,7 @@ void DAGCombiner::Run(bool RunningAfterLegalize) { else { assert(N->getValueType(0) == RV.getValueType() && N->getNumValues() == 1 && "Type mismatch"); - SDOperand OpV = RV; + SDValue OpV = RV; DAG.ReplaceAllUsesWith(N, &OpV, &DeadNodes); } @@ -662,7 +662,7 @@ void DAGCombiner::Run(bool RunningAfterLegalize) { DAG.setRoot(Dummy.getValue()); } -SDOperand DAGCombiner::visit(SDNode *N) { +SDValue DAGCombiner::visit(SDNode *N) { switch(N->getOpcode()) { default: break; case ISD::TokenFactor: return visitTokenFactor(N); @@ -726,12 +726,12 @@ SDOperand DAGCombiner::visit(SDNode *N) { case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N); case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::combine(SDNode *N) { +SDValue DAGCombiner::combine(SDNode *N) { - SDOperand RV = visit(N); + SDValue RV = visit(N); // If nothing happened, try a target-specific DAG combine. if (RV.Val == 0) { @@ -754,15 +754,15 @@ SDOperand DAGCombiner::combine(SDNode *N) { if (RV.Val == 0 && SelectionDAG::isCommutativeBinOp(N->getOpcode()) && N->getNumValues() == 1) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); // Constant operands are canonicalized to RHS. if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) { - SDOperand Ops[] = { N1, N0 }; + SDValue Ops[] = { N1, N0 }; SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops, 2); if (CSENode) - return SDOperand(CSENode, 0); + return SDValue(CSENode, 0); } } @@ -771,7 +771,7 @@ SDOperand DAGCombiner::combine(SDNode *N) { /// getInputChainForNode - Given a node, return its input chain if it has one, /// otherwise return a null sd operand. -static SDOperand getInputChainForNode(SDNode *N) { +static SDValue getInputChainForNode(SDNode *N) { if (unsigned NumOps = N->getNumOperands()) { if (N->getOperand(0).getValueType() == MVT::Other) return N->getOperand(0); @@ -781,10 +781,10 @@ static SDOperand getInputChainForNode(SDNode *N) { if (N->getOperand(i).getValueType() == MVT::Other) return N->getOperand(i); } - return SDOperand(0, 0); + return SDValue(0, 0); } -SDOperand DAGCombiner::visitTokenFactor(SDNode *N) { +SDValue DAGCombiner::visitTokenFactor(SDNode *N) { // If N has two operands, where one has an input chain equal to the other, // the 'other' chain is redundant. if (N->getNumOperands() == 2) { @@ -795,7 +795,7 @@ SDOperand DAGCombiner::visitTokenFactor(SDNode *N) { } SmallVector<SDNode *, 8> TFs; // List of token factors to visit. - SmallVector<SDOperand, 8> Ops; // Ops for replacing token factor. + SmallVector<SDValue, 8> Ops; // Ops for replacing token factor. SmallPtrSet<SDNode*, 16> SeenOps; bool Changed = false; // If we should replace this token factor. @@ -809,7 +809,7 @@ SDOperand DAGCombiner::visitTokenFactor(SDNode *N) { // Check each of the operands. for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) { - SDOperand Op = TF->getOperand(i); + SDValue Op = TF->getOperand(i); switch (Op.getOpcode()) { case ISD::EntryToken: @@ -841,7 +841,7 @@ SDOperand DAGCombiner::visitTokenFactor(SDNode *N) { } } - SDOperand Result; + SDValue Result; // If we've change things around then replace token factor. if (Changed) { @@ -861,22 +861,22 @@ SDOperand DAGCombiner::visitTokenFactor(SDNode *N) { } /// MERGE_VALUES can always be eliminated. -SDOperand DAGCombiner::visitMERGE_VALUES(SDNode *N) { +SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) { WorkListRemover DeadNodes(*this); for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, i), N->getOperand(i), + DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i), &DeadNodes); removeFromWorkList(N); DAG.DeleteNode(N); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } static -SDOperand combineShlAddConstant(SDOperand N0, SDOperand N1, SelectionDAG &DAG) { +SDValue combineShlAddConstant(SDValue N0, SDValue N1, SelectionDAG &DAG) { MVT VT = N0.getValueType(); - SDOperand N00 = N0.getOperand(0); - SDOperand N01 = N0.getOperand(1); + SDValue N00 = N0.getOperand(0); + SDValue N01 = N0.getOperand(1); ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N01); if (N01C && N00.getOpcode() == ISD::ADD && N00.Val->hasOneUse() && isa<ConstantSDNode>(N00.getOperand(1))) { @@ -885,22 +885,22 @@ SDOperand combineShlAddConstant(SDOperand N0, SDOperand N1, SelectionDAG &DAG) { DAG.getNode(ISD::SHL, VT, N00.getOperand(1), N01)); return DAG.getNode(ISD::ADD, VT, N0, N1); } - return SDOperand(); + return SDValue(); } static -SDOperand combineSelectAndUse(SDNode *N, SDOperand Slct, SDOperand OtherOp, - SelectionDAG &DAG) { +SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, + SelectionDAG &DAG) { MVT VT = N->getValueType(0); unsigned Opc = N->getOpcode(); bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; - SDOperand LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); - SDOperand RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); + SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); + SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); ISD::CondCode CC = ISD::SETCC_INVALID; if (isSlctCC) CC = cast<CondCodeSDNode>(Slct.getOperand(4))->get(); else { - SDOperand CCOp = Slct.getOperand(0); + SDValue CCOp = Slct.getOperand(0); if (CCOp.getOpcode() == ISD::SETCC) CC = cast<CondCodeSDNode>(CCOp.getOperand(2))->get(); } @@ -916,7 +916,7 @@ SDOperand combineSelectAndUse(SDNode *N, SDOperand Slct, SDOperand OtherOp, RHS.getOpcode() == ISD::Constant && cast<ConstantSDNode>(RHS)->isNullValue()) { std::swap(LHS, RHS); - SDOperand Op0 = Slct.getOperand(0); + SDValue Op0 = Slct.getOperand(0); bool isInt = (isSlctCC ? Op0.getValueType() : Op0.getOperand(0).getValueType()).isInteger(); CC = ISD::getSetCCInverse(CC, isInt); @@ -925,29 +925,29 @@ SDOperand combineSelectAndUse(SDNode *N, SDOperand Slct, SDOperand OtherOp, } if (DoXform) { - SDOperand Result = DAG.getNode(Opc, VT, OtherOp, RHS); + SDValue Result = DAG.getNode(Opc, VT, OtherOp, RHS); if (isSlctCC) return DAG.getSelectCC(OtherOp, Result, Slct.getOperand(0), Slct.getOperand(1), CC); - SDOperand CCOp = Slct.getOperand(0); + SDValue CCOp = Slct.getOperand(0); if (InvCC) CCOp = DAG.getSetCC(CCOp.getValueType(), CCOp.getOperand(0), CCOp.getOperand(1), CC); return DAG.getNode(ISD::SELECT, VT, CCOp, OtherOp, Result); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitADD(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitADD(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N0.getValueType(); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -973,7 +973,7 @@ SDOperand DAGCombiner::visitADD(SDNode *N) { N0C->getAPIntValue(), VT), N0.getOperand(1)); // reassociate add - SDOperand RADD = ReassociateOps(ISD::ADD, N0, N1); + SDValue RADD = ReassociateOps(ISD::ADD, N0, N1); if (RADD.Val != 0) return RADD; // fold ((0-A) + B) -> B-A @@ -988,8 +988,8 @@ SDOperand DAGCombiner::visitADD(SDNode *N) { if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1)) return N1.getOperand(0); - if (!VT.isVector() && SimplifyDemandedBits(SDOperand(N, 0))) - return SDOperand(N, 0); + if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0))) + return SDValue(N, 0); // fold (a+b) -> (a|b) iff a and b share no bits. if (VT.isInteger() && !VT.isVector()) { @@ -1010,30 +1010,30 @@ SDOperand DAGCombiner::visitADD(SDNode *N) { // fold (add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), ) if (N0.getOpcode() == ISD::SHL && N0.Val->hasOneUse()) { - SDOperand Result = combineShlAddConstant(N0, N1, DAG); + SDValue Result = combineShlAddConstant(N0, N1, DAG); if (Result.Val) return Result; } if (N1.getOpcode() == ISD::SHL && N1.Val->hasOneUse()) { - SDOperand Result = combineShlAddConstant(N1, N0, DAG); + SDValue Result = combineShlAddConstant(N1, N0, DAG); if (Result.Val) return Result; } // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) if (N0.getOpcode() == ISD::SELECT && N0.Val->hasOneUse()) { - SDOperand Result = combineSelectAndUse(N, N0, N1, DAG); + SDValue Result = combineSelectAndUse(N, N0, N1, DAG); if (Result.Val) return Result; } if (N1.getOpcode() == ISD::SELECT && N1.Val->hasOneUse()) { - SDOperand Result = combineSelectAndUse(N, N1, N0, DAG); + SDValue Result = combineSelectAndUse(N, N1, N0, DAG); if (Result.Val) return Result; } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitADDC(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitADDC(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N0.getValueType(); @@ -1067,13 +1067,13 @@ SDOperand DAGCombiner::visitADDC(SDNode *N) { DAG.getNode(ISD::CARRY_FALSE, MVT::Flag)); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitADDE(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); - SDOperand CarryIn = N->getOperand(2); +SDValue DAGCombiner::visitADDE(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue CarryIn = N->getOperand(2); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); //MVT VT = N0.getValueType(); @@ -1086,21 +1086,21 @@ SDOperand DAGCombiner::visitADDE(SDNode *N) { if (CarryIn.getOpcode() == ISD::CARRY_FALSE) return DAG.getNode(ISD::ADDC, N->getVTList(), N1, N0); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSUB(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitSUB(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); MVT VT = N0.getValueType(); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1122,7 +1122,7 @@ SDOperand DAGCombiner::visitSUB(SDNode *N) { return N0.getOperand(0); // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) if (N1.getOpcode() == ISD::SELECT && N1.Val->hasOneUse()) { - SDOperand Result = combineSelectAndUse(N, N1, N0, DAG); + SDValue Result = combineSelectAndUse(N, N1, N0, DAG); if (Result.Val) return Result; } // If either operand of a sub is undef, the result is undef @@ -1131,19 +1131,19 @@ SDOperand DAGCombiner::visitSUB(SDNode *N) { if (N1.getOpcode() == ISD::UNDEF) return N1; - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitMUL(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitMUL(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N0.getValueType(); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1180,7 +1180,7 @@ SDOperand DAGCombiner::visitMUL(SDNode *N) { // (mul (shl X, c1), c2) -> (mul X, c2 << c1) if (N1C && N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1))) { - SDOperand C3 = DAG.getNode(ISD::SHL, VT, N1, N0.getOperand(1)); + SDValue C3 = DAG.getNode(ISD::SHL, VT, N1, N0.getOperand(1)); AddToWorkList(C3.Val); return DAG.getNode(ISD::MUL, VT, N0.getOperand(0), C3); } @@ -1188,7 +1188,7 @@ SDOperand DAGCombiner::visitMUL(SDNode *N) { // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one // use. { - SDOperand Sh(0,0), Y(0,0); + SDValue Sh(0,0), Y(0,0); // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)). if (N0.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N0.getOperand(1)) && N0.Val->hasOneUse()) { @@ -1198,7 +1198,7 @@ SDOperand DAGCombiner::visitMUL(SDNode *N) { Sh = N1; Y = N0; } if (Sh.Val) { - SDOperand Mul = DAG.getNode(ISD::MUL, VT, Sh.getOperand(0), Y); + SDValue Mul = DAG.getNode(ISD::MUL, VT, Sh.getOperand(0), Y); return DAG.getNode(ISD::SHL, VT, Mul, Sh.getOperand(1)); } } @@ -1211,23 +1211,23 @@ SDOperand DAGCombiner::visitMUL(SDNode *N) { } // reassociate mul - SDOperand RMUL = ReassociateOps(ISD::MUL, N0, N1); + SDValue RMUL = ReassociateOps(ISD::MUL, N0, N1); if (RMUL.Val != 0) return RMUL; - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSDIV(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitSDIV(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); MVT VT = N->getValueType(0); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1253,23 +1253,23 @@ SDOperand DAGCombiner::visitSDIV(SDNode *N) { // If dividing by powers of two is cheap, then don't perform the following // fold. if (TLI.isPow2DivCheap()) - return SDOperand(); + return SDValue(); int64_t pow2 = N1C->getSignExtended(); int64_t abs2 = pow2 > 0 ? pow2 : -pow2; unsigned lg2 = Log2_64(abs2); // Splat the sign bit into the register - SDOperand SGN = DAG.getNode(ISD::SRA, VT, N0, + SDValue SGN = DAG.getNode(ISD::SRA, VT, N0, DAG.getConstant(VT.getSizeInBits()-1, TLI.getShiftAmountTy())); AddToWorkList(SGN.Val); // Add (N0 < 0) ? abs2 - 1 : 0; - SDOperand SRL = DAG.getNode(ISD::SRL, VT, SGN, + SDValue SRL = DAG.getNode(ISD::SRL, VT, SGN, DAG.getConstant(VT.getSizeInBits()-lg2, TLI.getShiftAmountTy())); - SDOperand ADD = DAG.getNode(ISD::ADD, VT, N0, SRL); + SDValue ADD = DAG.getNode(ISD::ADD, VT, N0, SRL); AddToWorkList(SRL.Val); AddToWorkList(ADD.Val); // Divide by pow2 - SDOperand SRA = DAG.getNode(ISD::SRA, VT, ADD, + SDValue SRA = DAG.getNode(ISD::SRA, VT, ADD, DAG.getConstant(lg2, TLI.getShiftAmountTy())); // If we're dividing by a positive value, we're done. Otherwise, we must // negate the result. @@ -1282,7 +1282,7 @@ SDOperand DAGCombiner::visitSDIV(SDNode *N) { // alternate sequence. if (N1C && (N1C->getSignExtended() < -1 || N1C->getSignExtended() > 1) && !TLI.isIntDivCheap()) { - SDOperand Op = BuildSDIV(N); + SDValue Op = BuildSDIV(N); if (Op.Val) return Op; } @@ -1293,19 +1293,19 @@ SDOperand DAGCombiner::visitSDIV(SDNode *N) { if (N1.getOpcode() == ISD::UNDEF) return N1; - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitUDIV(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitUDIV(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.Val); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); MVT VT = N->getValueType(0); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1322,7 +1322,7 @@ SDOperand DAGCombiner::visitUDIV(SDNode *N) { if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { if (SHC->getAPIntValue().isPowerOf2()) { MVT ADDVT = N1.getOperand(1).getValueType(); - SDOperand Add = DAG.getNode(ISD::ADD, ADDVT, N1.getOperand(1), + SDValue Add = DAG.getNode(ISD::ADD, ADDVT, N1.getOperand(1), DAG.getConstant(SHC->getAPIntValue() .logBase2(), ADDVT)); @@ -1333,7 +1333,7 @@ SDOperand DAGCombiner::visitUDIV(SDNode *N) { } // fold (udiv x, c) -> alternate if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap()) { - SDOperand Op = BuildUDIV(N); + SDValue Op = BuildUDIV(N); if (Op.Val) return Op; } @@ -1344,12 +1344,12 @@ SDOperand DAGCombiner::visitUDIV(SDNode *N) { if (N1.getOpcode() == ISD::UNDEF) return N1; - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSREM(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitSREM(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N->getValueType(0); @@ -1367,12 +1367,12 @@ SDOperand DAGCombiner::visitSREM(SDNode *N) { // If X/C can be simplified by the division-by-constant logic, lower // X%C to the equivalent of X-X/C*C. if (N1C && !N1C->isNullValue()) { - SDOperand Div = DAG.getNode(ISD::SDIV, VT, N0, N1); + SDValue Div = DAG.getNode(ISD::SDIV, VT, N0, N1); AddToWorkList(Div.Val); - SDOperand OptimizedDiv = combine(Div.Val); + SDValue OptimizedDiv = combine(Div.Val); if (OptimizedDiv.Val && OptimizedDiv.Val != Div.Val) { - SDOperand Mul = DAG.getNode(ISD::MUL, VT, OptimizedDiv, N1); - SDOperand Sub = DAG.getNode(ISD::SUB, VT, N0, Mul); + SDValue Mul = DAG.getNode(ISD::MUL, VT, OptimizedDiv, N1); + SDValue Sub = DAG.getNode(ISD::SUB, VT, N0, Mul); AddToWorkList(Mul.Val); return Sub; } @@ -1385,12 +1385,12 @@ SDOperand DAGCombiner::visitSREM(SDNode *N) { if (N1.getOpcode() == ISD::UNDEF) return N1; - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitUREM(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitUREM(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N->getValueType(0); @@ -1406,7 +1406,7 @@ SDOperand DAGCombiner::visitUREM(SDNode *N) { if (N1.getOpcode() == ISD::SHL) { if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) { if (SHC->getAPIntValue().isPowerOf2()) { - SDOperand Add = + SDValue Add = DAG.getNode(ISD::ADD, VT, N1, DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT)); @@ -1419,11 +1419,11 @@ SDOperand DAGCombiner::visitUREM(SDNode *N) { // If X/C can be simplified by the division-by-constant logic, lower // X%C to the equivalent of X-X/C*C. if (N1C && !N1C->isNullValue()) { - SDOperand Div = DAG.getNode(ISD::UDIV, VT, N0, N1); - SDOperand OptimizedDiv = combine(Div.Val); + SDValue Div = DAG.getNode(ISD::UDIV, VT, N0, N1); + SDValue OptimizedDiv = combine(Div.Val); if (OptimizedDiv.Val && OptimizedDiv.Val != Div.Val) { - SDOperand Mul = DAG.getNode(ISD::MUL, VT, OptimizedDiv, N1); - SDOperand Sub = DAG.getNode(ISD::SUB, VT, N0, Mul); + SDValue Mul = DAG.getNode(ISD::MUL, VT, OptimizedDiv, N1); + SDValue Sub = DAG.getNode(ISD::SUB, VT, N0, Mul); AddToWorkList(Mul.Val); return Sub; } @@ -1436,12 +1436,12 @@ SDOperand DAGCombiner::visitUREM(SDNode *N) { if (N1.getOpcode() == ISD::UNDEF) return N1; - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitMULHS(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitMULHS(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N->getValueType(0); @@ -1457,12 +1457,12 @@ SDOperand DAGCombiner::visitMULHS(SDNode *N) { if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) return DAG.getConstant(0, VT); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitMULHU(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitMULHU(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N->getValueType(0); @@ -1476,21 +1476,21 @@ SDOperand DAGCombiner::visitMULHU(SDNode *N) { if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF) return DAG.getConstant(0, VT); - return SDOperand(); + return SDValue(); } /// SimplifyNodeWithTwoResults - Perform optimizations common to nodes that /// compute two values. LoOp and HiOp give the opcodes for the two computations /// that are being performed. Return true if a simplification was made. /// -SDOperand DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, - unsigned HiOp) { +SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, + unsigned HiOp) { // If the high half is not needed, just compute the low half. bool HiExists = N->hasAnyUseOfValue(1); if (!HiExists && (!AfterLegalize || TLI.isOperationLegal(LoOp, N->getValueType(0)))) { - SDOperand Res = DAG.getNode(LoOp, N->getValueType(0), N->op_begin(), + SDValue Res = DAG.getNode(LoOp, N->getValueType(0), N->op_begin(), N->getNumOperands()); return CombineTo(N, Res, Res); } @@ -1500,21 +1500,21 @@ SDOperand DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, if (!LoExists && (!AfterLegalize || TLI.isOperationLegal(HiOp, N->getValueType(1)))) { - SDOperand Res = DAG.getNode(HiOp, N->getValueType(1), N->op_begin(), + SDValue Res = DAG.getNode(HiOp, N->getValueType(1), N->op_begin(), N->getNumOperands()); return CombineTo(N, Res, Res); } // If both halves are used, return as it is. if (LoExists && HiExists) - return SDOperand(); + return SDValue(); // If the two computed results can be simplified separately, separate them. if (LoExists) { - SDOperand Lo = DAG.getNode(LoOp, N->getValueType(0), + SDValue Lo = DAG.getNode(LoOp, N->getValueType(0), N->op_begin(), N->getNumOperands()); AddToWorkList(Lo.Val); - SDOperand LoOpt = combine(Lo.Val); + SDValue LoOpt = combine(Lo.Val); if (LoOpt.Val && LoOpt.Val != Lo.Val && (!AfterLegalize || TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType()))) @@ -1522,50 +1522,50 @@ SDOperand DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, } if (HiExists) { - SDOperand Hi = DAG.getNode(HiOp, N->getValueType(1), + SDValue Hi = DAG.getNode(HiOp, N->getValueType(1), N->op_begin(), N->getNumOperands()); AddToWorkList(Hi.Val); - SDOperand HiOpt = combine(Hi.Val); + SDValue HiOpt = combine(Hi.Val); if (HiOpt.Val && HiOpt != Hi && (!AfterLegalize || TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType()))) return CombineTo(N, HiOpt, HiOpt); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSMUL_LOHI(SDNode *N) { - SDOperand Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS); +SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) { + SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS); if (Res.Val) return Res; - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitUMUL_LOHI(SDNode *N) { - SDOperand Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU); +SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) { + SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU); if (Res.Val) return Res; - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSDIVREM(SDNode *N) { - SDOperand Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM); +SDValue DAGCombiner::visitSDIVREM(SDNode *N) { + SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM); if (Res.Val) return Res; - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitUDIVREM(SDNode *N) { - SDOperand Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM); +SDValue DAGCombiner::visitUDIVREM(SDNode *N) { + SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM); if (Res.Val) return Res; - return SDOperand(); + return SDValue(); } /// SimplifyBinOpWithSameOpcodeHands - If this is a binary operator with /// two operands of the same opcode, try to simplify it. -SDOperand DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { - SDOperand N0 = N->getOperand(0), N1 = N->getOperand(1); +SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { + SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); MVT VT = N0.getValueType(); assert(N0.getOpcode() == N1.getOpcode() && "Bad input!"); @@ -1577,7 +1577,7 @@ SDOperand DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { if ((N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND|| N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::TRUNCATE) && N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) { - SDOperand ORNode = DAG.getNode(N->getOpcode(), + SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getOperand(0).getValueType(), N0.getOperand(0), N1.getOperand(0)); AddToWorkList(ORNode.Val); @@ -1591,20 +1591,20 @@ SDOperand DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) && N0.getOperand(1) == N1.getOperand(1)) { - SDOperand ORNode = DAG.getNode(N->getOpcode(), + SDValue ORNode = DAG.getNode(N->getOpcode(), N0.getOperand(0).getValueType(), N0.getOperand(0), N1.getOperand(0)); AddToWorkList(ORNode.Val); return DAG.getNode(N0.getOpcode(), VT, ORNode, N0.getOperand(1)); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitAND(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); - SDOperand LL, LR, RL, RR, CC0, CC1; +SDValue DAGCombiner::visitAND(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue LL, LR, RL, RR, CC0, CC1; ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N1.getValueType(); @@ -1612,7 +1612,7 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1629,11 +1629,11 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { if (N1C && N1C->isAllOnesValue()) return N0; // if (and x, c) is known to be zero, return 0 - if (N1C && DAG.MaskedValueIsZero(SDOperand(N, 0), + if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), APInt::getAllOnesValue(BitWidth))) return DAG.getConstant(0, VT); // reassociate and - SDOperand RAND = ReassociateOps(ISD::AND, N0, N1); + SDValue RAND = ReassociateOps(ISD::AND, N0, N1); if (RAND.Val != 0) return RAND; // fold (and (or x, 0xFFFF), 0xFF) -> 0xFF @@ -1643,11 +1643,11 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { return N1; // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits. if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { - SDOperand N0Op0 = N0.getOperand(0); + SDValue N0Op0 = N0.getOperand(0); APInt Mask = ~N1C->getAPIntValue(); Mask.trunc(N0Op0.getValueSizeInBits()); if (DAG.MaskedValueIsZero(N0Op0, Mask)) { - SDOperand Zext = DAG.getNode(ISD::ZERO_EXTEND, N0.getValueType(), + SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, N0.getValueType(), N0Op0); // Replace uses of the AND with uses of the Zero extend node. @@ -1657,7 +1657,7 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { // zero_extend, to avoid duplicating things. This will later cause this // AND to be folded. CombineTo(N0.Val, Zext); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } // fold (and (setcc x), (setcc y)) -> (setcc (and x, y)) @@ -1669,19 +1669,19 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { LL.getValueType().isInteger()) { // fold (X == 0) & (Y == 0) -> (X|Y == 0) if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) { - SDOperand ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL); + SDValue ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL); AddToWorkList(ORNode.Val); return DAG.getSetCC(VT, ORNode, LR, Op1); } // fold (X == -1) & (Y == -1) -> (X&Y == -1) if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) { - SDOperand ANDNode = DAG.getNode(ISD::AND, LR.getValueType(), LL, RL); + SDValue ANDNode = DAG.getNode(ISD::AND, LR.getValueType(), LL, RL); AddToWorkList(ANDNode.Val); return DAG.getSetCC(VT, ANDNode, LR, Op1); } // fold (X > -1) & (Y > -1) -> (X|Y > -1) if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) { - SDOperand ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL); + SDValue ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL); AddToWorkList(ORNode.Val); return DAG.getSetCC(VT, ORNode, LR, Op1); } @@ -1701,15 +1701,15 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { // Simplify: and (op x...), (op y...) -> (op (and x, y)) if (N0.getOpcode() == N1.getOpcode()) { - SDOperand Tmp = SimplifyBinOpWithSameOpcodeHands(N); + SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); if (Tmp.Val) return Tmp; } // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) // fold (and (sra)) -> (and (srl)) when possible. if (!VT.isVector() && - SimplifyDemandedBits(SDOperand(N, 0))) - return SDOperand(N, 0); + SimplifyDemandedBits(SDValue(N, 0))) + return SDValue(N, 0); // fold (zext_inreg (extload x)) -> (zextload x) if (ISD::isEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val)) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); @@ -1721,14 +1721,14 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { BitWidth - EVT.getSizeInBits())) && ((!AfterLegalize && !LN0->isVolatile()) || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) { - SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT, LN0->isVolatile(), LN0->getAlignment()); AddToWorkList(N); CombineTo(N0.Val, ExtLoad, ExtLoad.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use @@ -1743,14 +1743,14 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { BitWidth - EVT.getSizeInBits())) && ((!AfterLegalize && !LN0->isVolatile()) || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) { - SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT, LN0->isVolatile(), LN0->getAlignment()); AddToWorkList(N); CombineTo(N0.Val, ExtLoad, ExtLoad.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } @@ -1780,38 +1780,38 @@ SDOperand DAGCombiner::visitAND(SDNode *N) { unsigned EVTStoreBytes = EVT.getStoreSizeInBits()/8; unsigned PtrOff = LVTStoreBytes - EVTStoreBytes; unsigned Alignment = LN0->getAlignment(); - SDOperand NewPtr = LN0->getBasePtr(); + SDValue NewPtr = LN0->getBasePtr(); if (TLI.isBigEndian()) { NewPtr = DAG.getNode(ISD::ADD, PtrType, NewPtr, DAG.getConstant(PtrOff, PtrType)); Alignment = MinAlign(Alignment, PtrOff); } AddToWorkList(NewPtr.Val); - SDOperand Load = + SDValue Load = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), NewPtr, LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT, LN0->isVolatile(), Alignment); AddToWorkList(N); CombineTo(N0.Val, Load, Load.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitOR(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); - SDOperand LL, LR, RL, RR, CC0, CC1; +SDValue DAGCombiner::visitOR(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue LL, LR, RL, RR, CC0, CC1; ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N1.getValueType(); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -1834,7 +1834,7 @@ SDOperand DAGCombiner::visitOR(SDNode *N) { if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue())) return N1; // reassociate or - SDOperand ROR = ReassociateOps(ISD::OR, N0, N1); + SDValue ROR = ReassociateOps(ISD::OR, N0, N1); if (ROR.Val != 0) return ROR; // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) @@ -1857,7 +1857,7 @@ SDOperand DAGCombiner::visitOR(SDNode *N) { // fold (X < 0) | (Y < 0) -> (X|Y < 0) if (cast<ConstantSDNode>(LR)->isNullValue() && (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) { - SDOperand ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL); + SDValue ORNode = DAG.getNode(ISD::OR, LR.getValueType(), LL, RL); AddToWorkList(ORNode.Val); return DAG.getSetCC(VT, ORNode, LR, Op1); } @@ -1865,7 +1865,7 @@ SDOperand DAGCombiner::visitOR(SDNode *N) { // fold (X > -1) | (Y > -1) -> (X&Y > -1) if (cast<ConstantSDNode>(LR)->isAllOnesValue() && (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) { - SDOperand ANDNode = DAG.getNode(ISD::AND, LR.getValueType(), LL, RL); + SDValue ANDNode = DAG.getNode(ISD::AND, LR.getValueType(), LL, RL); AddToWorkList(ANDNode.Val); return DAG.getSetCC(VT, ANDNode, LR, Op1); } @@ -1885,7 +1885,7 @@ SDOperand DAGCombiner::visitOR(SDNode *N) { // Simplify: or (op x...), (op y...) -> (op (or x, y)) if (N0.getOpcode() == N1.getOpcode()) { - SDOperand Tmp = SimplifyBinOpWithSameOpcodeHands(N); + SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); if (Tmp.Val) return Tmp; } @@ -1905,7 +1905,7 @@ SDOperand DAGCombiner::visitOR(SDNode *N) { if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) && DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) { - SDOperand X =DAG.getNode(ISD::OR, VT, N0.getOperand(0), N1.getOperand(0)); + SDValue X =DAG.getNode(ISD::OR, VT, N0.getOperand(0), N1.getOperand(0)); return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(LHSMask|RHSMask, VT)); } } @@ -1913,14 +1913,14 @@ SDOperand DAGCombiner::visitOR(SDNode *N) { // See if this is some rotate idiom. if (SDNode *Rot = MatchRotate(N0, N1)) - return SDOperand(Rot, 0); + return SDValue(Rot, 0); - return SDOperand(); + return SDValue(); } /// MatchRotateHalf - Match "(X shl/srl V1) & V2" where V2 may not be present. -static bool MatchRotateHalf(SDOperand Op, SDOperand &Shift, SDOperand &Mask) { +static bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) { if (Op.getOpcode() == ISD::AND) { if (isa<ConstantSDNode>(Op.getOperand(1))) { Mask = Op.getOperand(1); @@ -1941,7 +1941,7 @@ static bool MatchRotateHalf(SDOperand Op, SDOperand &Shift, SDOperand &Mask) { // MatchRotate - Handle an 'or' of two operands. If this is one of the many // idioms for rotate, and if the target supports rotation instructions, generate // a rot[lr]. -SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) { +SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS) { // Must be a legal type. Expanded 'n promoted things won't work with rotates. MVT VT = LHS.getValueType(); if (!TLI.isTypeLegal(VT)) return 0; @@ -1952,13 +1952,13 @@ SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) { if (!HasROTL && !HasROTR) return 0; // Match "(X shl/srl V1) & V2" where V2 may not be present. - SDOperand LHSShift; // The shift. - SDOperand LHSMask; // AND value if any. + SDValue LHSShift; // The shift. + SDValue LHSMask; // AND value if any. if (!MatchRotateHalf(LHS, LHSShift, LHSMask)) return 0; // Not part of a rotate. - SDOperand RHSShift; // The shift. - SDOperand RHSMask; // AND value if any. + SDValue RHSShift; // The shift. + SDValue RHSMask; // AND value if any. if (!MatchRotateHalf(RHS, RHSShift, RHSMask)) return 0; // Not part of a rotate. @@ -1976,9 +1976,9 @@ SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) { } unsigned OpSizeInBits = VT.getSizeInBits(); - SDOperand LHSShiftArg = LHSShift.getOperand(0); - SDOperand LHSShiftAmt = LHSShift.getOperand(1); - SDOperand RHSShiftAmt = RHSShift.getOperand(1); + SDValue LHSShiftArg = LHSShift.getOperand(0); + SDValue LHSShiftAmt = LHSShift.getOperand(1); + SDValue RHSShiftAmt = RHSShift.getOperand(1); // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1) // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2) @@ -1989,7 +1989,7 @@ SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) { if ((LShVal + RShVal) != OpSizeInBits) return 0; - SDOperand Rot; + SDValue Rot; if (HasROTL) Rot = DAG.getNode(ISD::ROTL, VT, LHSShiftArg, LHSShiftAmt); else @@ -2056,8 +2056,8 @@ SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) { (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND)) { - SDOperand LExtOp0 = LHSShiftAmt.getOperand(0); - SDOperand RExtOp0 = RHSShiftAmt.getOperand(0); + SDValue LExtOp0 = LHSShiftAmt.getOperand(0); + SDValue RExtOp0 = RHSShiftAmt.getOperand(0); if (RExtOp0.getOpcode() == ISD::SUB && RExtOp0.getOperand(1) == LExtOp0) { // fold (or (shl x, (*ext y)), (srl x, (*ext (sub 32, y)))) -> @@ -2093,17 +2093,17 @@ SDNode *DAGCombiner::MatchRotate(SDOperand LHS, SDOperand RHS) { } -SDOperand DAGCombiner::visitXOR(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); - SDOperand LHS, RHS, CC; +SDValue DAGCombiner::visitXOR(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue LHS, RHS, CC; ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N0.getValueType(); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -2125,7 +2125,7 @@ SDOperand DAGCombiner::visitXOR(SDNode *N) { if (N1C && N1C->isNullValue()) return N0; // reassociate xor - SDOperand RXOR = ReassociateOps(ISD::XOR, N0, N1); + SDValue RXOR = ReassociateOps(ISD::XOR, N0, N1); if (RXOR.Val != 0) return RXOR; // fold !(x cc y) -> (x !cc y) @@ -2143,7 +2143,7 @@ SDOperand DAGCombiner::visitXOR(SDNode *N) { // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y))) if (N1C && N1C->getAPIntValue() == 1 && N0.getOpcode() == ISD::ZERO_EXTEND && N0.Val->hasOneUse() && isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){ - SDOperand V = N0.getOperand(0); + SDValue V = N0.getOperand(0); V = DAG.getNode(ISD::XOR, V.getValueType(), V, DAG.getConstant(1, V.getValueType())); AddToWorkList(V.Val); @@ -2153,7 +2153,7 @@ SDOperand DAGCombiner::visitXOR(SDNode *N) { // fold !(x or y) -> (!x and !y) iff x or y are setcc if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 && (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { - SDOperand LHS = N0.getOperand(0), RHS = N0.getOperand(1); + SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) { unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; LHS = DAG.getNode(ISD::XOR, VT, LHS, N1); // RHS = ~LHS @@ -2165,7 +2165,7 @@ SDOperand DAGCombiner::visitXOR(SDNode *N) { // fold !(x or y) -> (!x and !y) iff x or y are constants if (N1C && N1C->isAllOnesValue() && (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { - SDOperand LHS = N0.getOperand(0), RHS = N0.getOperand(1); + SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) { unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; LHS = DAG.getNode(ISD::XOR, VT, LHS, N1); // RHS = ~LHS @@ -2193,31 +2193,31 @@ SDOperand DAGCombiner::visitXOR(SDNode *N) { return DAG.getConstant(0, VT); } else if (!AfterLegalize || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) { // Produce a vector of zeros. - SDOperand El = DAG.getConstant(0, VT.getVectorElementType()); - std::vector<SDOperand> Ops(VT.getVectorNumElements(), El); + SDValue El = DAG.getConstant(0, VT.getVectorElementType()); + std::vector<SDValue> Ops(VT.getVectorNumElements(), El); return DAG.getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); } } // Simplify: xor (op x...), (op y...) -> (op (xor x, y)) if (N0.getOpcode() == N1.getOpcode()) { - SDOperand Tmp = SimplifyBinOpWithSameOpcodeHands(N); + SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N); if (Tmp.Val) return Tmp; } // Simplify the expression using non-local knowledge. if (!VT.isVector() && - SimplifyDemandedBits(SDOperand(N, 0))) - return SDOperand(N, 0); + SimplifyDemandedBits(SDValue(N, 0))) + return SDValue(N, 0); - return SDOperand(); + return SDValue(); } /// visitShiftByConstant - Handle transforms common to the three shifts, when /// the shift amount is a constant. -SDOperand DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { +SDValue DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { SDNode *LHS = N->getOperand(0).Val; - if (!LHS->hasOneUse()) return SDOperand(); + if (!LHS->hasOneUse()) return SDValue(); // We want to pull some binops through shifts, so that we have (and (shift)) // instead of (shift (and)), likewise for add, or, xor, etc. This sort of @@ -2226,7 +2226,7 @@ SDOperand DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { bool HighBitSet = false; // Can we transform this if the high bit is set? switch (LHS->getOpcode()) { - default: return SDOperand(); + default: return SDValue(); case ISD::OR: case ISD::XOR: HighBitSet = false; // We can only transform sra if the high bit is clear. @@ -2236,14 +2236,14 @@ SDOperand DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { break; case ISD::ADD: if (N->getOpcode() != ISD::SHL) - return SDOperand(); // only shl(add) not sr[al](add). + return SDValue(); // only shl(add) not sr[al](add). HighBitSet = false; // We can only transform sra if the high bit is clear. break; } // We require the RHS of the binop to be a constant as well. ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1)); - if (!BinOpCst) return SDOperand(); + if (!BinOpCst) return SDValue(); // FIXME: disable this for unless the input to the binop is a shift by a @@ -2256,7 +2256,7 @@ SDOperand DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { BinOpLHSVal->getOpcode() != ISD::SRA && BinOpLHSVal->getOpcode() != ISD::SRL) || !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1))) - return SDOperand(); + return SDValue(); MVT VT = N->getValueType(0); @@ -2268,15 +2268,15 @@ SDOperand DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { if (N->getOpcode() == ISD::SRA) { bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative(); if (BinOpRHSSignSet != HighBitSet) - return SDOperand(); + return SDValue(); } // Fold the constants, shifting the binop RHS by the shift amount. - SDOperand NewRHS = DAG.getNode(N->getOpcode(), N->getValueType(0), + SDValue NewRHS = DAG.getNode(N->getOpcode(), N->getValueType(0), LHS->getOperand(1), N->getOperand(1)); // Create the new shift. - SDOperand NewShift = DAG.getNode(N->getOpcode(), VT, LHS->getOperand(0), + SDValue NewShift = DAG.getNode(N->getOpcode(), VT, LHS->getOperand(0), N->getOperand(1)); // Create the new binop. @@ -2284,9 +2284,9 @@ SDOperand DAGCombiner::visitShiftByConstant(SDNode *N, unsigned Amt) { } -SDOperand DAGCombiner::visitSHL(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitSHL(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N0.getValueType(); @@ -2305,11 +2305,11 @@ SDOperand DAGCombiner::visitSHL(SDNode *N) { if (N1C && N1C->isNullValue()) return N0; // if (shl x, c) is known to be zero, return 0 - if (DAG.MaskedValueIsZero(SDOperand(N, 0), + if (DAG.MaskedValueIsZero(SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()))) return DAG.getConstant(0, VT); - if (N1C && SimplifyDemandedBits(SDOperand(N, 0))) - return SDOperand(N, 0); + if (N1C && SimplifyDemandedBits(SDValue(N, 0))) + return SDValue(N, 0); // fold (shl (shl x, c1), c2) -> 0 or (shl x, c1+c2) if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1).getOpcode() == ISD::Constant) { @@ -2326,7 +2326,7 @@ SDOperand DAGCombiner::visitSHL(SDNode *N) { N0.getOperand(1).getOpcode() == ISD::Constant) { uint64_t c1 = cast<ConstantSDNode>(N0.getOperand(1))->getValue(); uint64_t c2 = N1C->getValue(); - SDOperand Mask = DAG.getNode(ISD::AND, VT, N0.getOperand(0), + SDValue Mask = DAG.getNode(ISD::AND, VT, N0.getOperand(0), DAG.getConstant(~0ULL << c1, VT)); if (c2 > c1) return DAG.getNode(ISD::SHL, VT, Mask, @@ -2340,12 +2340,12 @@ SDOperand DAGCombiner::visitSHL(SDNode *N) { return DAG.getNode(ISD::AND, VT, N0.getOperand(0), DAG.getConstant(~0ULL << N1C->getValue(), VT)); - return N1C ? visitShiftByConstant(N, N1C->getValue()) : SDOperand(); + return N1C ? visitShiftByConstant(N, N1C->getValue()) : SDValue(); } -SDOperand DAGCombiner::visitSRA(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitSRA(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N0.getValueType(); @@ -2411,29 +2411,29 @@ SDOperand DAGCombiner::visitSRA(SDNode *N) { TLI.isOperationLegal(ISD::TRUNCATE, VT) && TLI.isTruncateFree(VT, TruncVT)) { - SDOperand Amt = DAG.getConstant(ShiftAmt, TLI.getShiftAmountTy()); - SDOperand Shift = DAG.getNode(ISD::SRL, VT, N0.getOperand(0), Amt); - SDOperand Trunc = DAG.getNode(ISD::TRUNCATE, TruncVT, Shift); + SDValue Amt = DAG.getConstant(ShiftAmt, TLI.getShiftAmountTy()); + SDValue Shift = DAG.getNode(ISD::SRL, VT, N0.getOperand(0), Amt); + SDValue Trunc = DAG.getNode(ISD::TRUNCATE, TruncVT, Shift); return DAG.getNode(ISD::SIGN_EXTEND, N->getValueType(0), Trunc); } } } // Simplify, based on bits shifted out of the LHS. - if (N1C && SimplifyDemandedBits(SDOperand(N, 0))) - return SDOperand(N, 0); + if (N1C && SimplifyDemandedBits(SDValue(N, 0))) + return SDValue(N, 0); // If the sign bit is known to be zero, switch this to a SRL. if (DAG.SignBitIsZero(N0)) return DAG.getNode(ISD::SRL, VT, N0, N1); - return N1C ? visitShiftByConstant(N, N1C->getValue()) : SDOperand(); + return N1C ? visitShiftByConstant(N, N1C->getValue()) : SDValue(); } -SDOperand DAGCombiner::visitSRL(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitSRL(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); MVT VT = N0.getValueType(); @@ -2452,7 +2452,7 @@ SDOperand DAGCombiner::visitSRL(SDNode *N) { if (N1C && N1C->isNullValue()) return N0; // if (srl x, c) is known to be zero, return 0 - if (N1C && DAG.MaskedValueIsZero(SDOperand(N, 0), + if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), APInt::getAllOnesValue(OpSizeInBits))) return DAG.getConstant(0, VT); @@ -2474,7 +2474,7 @@ SDOperand DAGCombiner::visitSRL(SDNode *N) { if (N1C->getValue() >= SmallVT.getSizeInBits()) return DAG.getNode(ISD::UNDEF, VT); - SDOperand SmallShift = DAG.getNode(ISD::SRL, SmallVT, N0.getOperand(0), N1); + SDValue SmallShift = DAG.getNode(ISD::SRL, SmallVT, N0.getOperand(0), N1); AddToWorkList(SmallShift.Val); return DAG.getNode(ISD::ANY_EXTEND, VT, SmallShift); } @@ -2509,7 +2509,7 @@ SDOperand DAGCombiner::visitSRL(SDNode *N) { // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair // to an SRL,XOR pair, which is likely to simplify more. unsigned ShAmt = UnknownBits.countTrailingZeros(); - SDOperand Op = N0.getOperand(0); + SDValue Op = N0.getOperand(0); if (ShAmt) { Op = DAG.getNode(ISD::SRL, VT, Op, DAG.getConstant(ShAmt, TLI.getShiftAmountTy())); @@ -2521,46 +2521,46 @@ SDOperand DAGCombiner::visitSRL(SDNode *N) { // fold operands of srl based on knowledge that the low bits are not // demanded. - if (N1C && SimplifyDemandedBits(SDOperand(N, 0))) - return SDOperand(N, 0); + if (N1C && SimplifyDemandedBits(SDValue(N, 0))) + return SDValue(N, 0); - return N1C ? visitShiftByConstant(N, N1C->getValue()) : SDOperand(); + return N1C ? visitShiftByConstant(N, N1C->getValue()) : SDValue(); } -SDOperand DAGCombiner::visitCTLZ(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitCTLZ(SDNode *N) { + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); // fold (ctlz c1) -> c2 if (isa<ConstantSDNode>(N0)) return DAG.getNode(ISD::CTLZ, VT, N0); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitCTTZ(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitCTTZ(SDNode *N) { + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); // fold (cttz c1) -> c2 if (isa<ConstantSDNode>(N0)) return DAG.getNode(ISD::CTTZ, VT, N0); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitCTPOP(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitCTPOP(SDNode *N) { + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); // fold (ctpop c1) -> c2 if (isa<ConstantSDNode>(N0)) return DAG.getNode(ISD::CTPOP, VT, N0); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSELECT(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); - SDOperand N2 = N->getOperand(2); +SDValue DAGCombiner::visitSELECT(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue N2 = N->getOperand(2); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2); @@ -2582,7 +2582,7 @@ SDOperand DAGCombiner::visitSELECT(SDNode *N) { // fold select C, 0, 1 -> ~C if (VT.isInteger() && VT0.isInteger() && N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) { - SDOperand XORNode = DAG.getNode(ISD::XOR, VT0, N0, DAG.getConstant(1, VT0)); + SDValue XORNode = DAG.getNode(ISD::XOR, VT0, N0, DAG.getConstant(1, VT0)); if (VT == VT0) return XORNode; AddToWorkList(XORNode.Val); @@ -2592,13 +2592,13 @@ SDOperand DAGCombiner::visitSELECT(SDNode *N) { } // fold select C, 0, X -> ~C & X if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) { - SDOperand XORNode = DAG.getNode(ISD::XOR, VT, N0, DAG.getConstant(1, VT)); + SDValue XORNode = DAG.getNode(ISD::XOR, VT, N0, DAG.getConstant(1, VT)); AddToWorkList(XORNode.Val); return DAG.getNode(ISD::AND, VT, XORNode, N2); } // fold select C, X, 1 -> ~C | X if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) { - SDOperand XORNode = DAG.getNode(ISD::XOR, VT, N0, DAG.getConstant(1, VT)); + SDValue XORNode = DAG.getNode(ISD::XOR, VT, N0, DAG.getConstant(1, VT)); AddToWorkList(XORNode.Val); return DAG.getNode(ISD::OR, VT, XORNode, N1); } @@ -2615,7 +2615,7 @@ SDOperand DAGCombiner::visitSELECT(SDNode *N) { // If we can fold this based on the true/false value, do so. if (SimplifySelectOps(N, N1, N2)) - return SDOperand(N, 0); // Don't revisit N. + return SDValue(N, 0); // Don't revisit N. // fold selects based on a setcc into other things, such as min/max/abs if (N0.getOpcode() == ISD::SETCC) { @@ -2629,15 +2629,15 @@ SDOperand DAGCombiner::visitSELECT(SDNode *N) { else return SimplifySelect(N0, N1, N2); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSELECT_CC(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); - SDOperand N2 = N->getOperand(2); - SDOperand N3 = N->getOperand(3); - SDOperand N4 = N->getOperand(4); +SDValue DAGCombiner::visitSELECT_CC(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue N2 = N->getOperand(2); + SDValue N3 = N->getOperand(3); + SDValue N4 = N->getOperand(4); ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get(); // fold select_cc lhs, rhs, x, x, cc -> x @@ -2645,7 +2645,7 @@ SDOperand DAGCombiner::visitSELECT_CC(SDNode *N) { return N2; // Determine if the condition we're dealing with is constant - SDOperand SCC = SimplifySetCC(TLI.getSetCCResultType(N0), N0, N1, CC, false); + SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0), N0, N1, CC, false); if (SCC.Val) AddToWorkList(SCC.Val); if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.Val)) { @@ -2663,13 +2663,13 @@ SDOperand DAGCombiner::visitSELECT_CC(SDNode *N) { // If we can fold this based on the true/false value, do so. if (SimplifySelectOps(N, N2, N3)) - return SDOperand(N, 0); // Don't revisit N. + return SDValue(N, 0); // Don't revisit N. // fold select_cc into other things, such as min/max/abs return SimplifySelectCC(N0, N1, N2, N3, CC); } -SDOperand DAGCombiner::visitSETCC(SDNode *N) { +SDValue DAGCombiner::visitSETCC(SDNode *N) { return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1), cast<CondCodeSDNode>(N->getOperand(2))->get()); } @@ -2678,7 +2678,7 @@ SDOperand DAGCombiner::visitSETCC(SDNode *N) { // "fold ({s|z}ext (load x)) -> ({s|z}ext (truncate ({s|z}extload x)))" // transformation. Returns true if extension are possible and the above // mentioned transformation is profitable. -static bool ExtendUsesToFormExtLoad(SDNode *N, SDOperand N0, +static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0, unsigned ExtOpc, SmallVector<SDNode*, 4> &ExtendNodes, TargetLowering &TLI) { @@ -2697,7 +2697,7 @@ static bool ExtendUsesToFormExtLoad(SDNode *N, SDOperand N0, return false; bool Add = false; for (unsigned i = 0; i != 2; ++i) { - SDOperand UseOp = User->getOperand(i); + SDValue UseOp = User->getOperand(i); if (UseOp == N0) continue; if (!isa<ConstantSDNode>(UseOp)) @@ -2708,7 +2708,7 @@ static bool ExtendUsesToFormExtLoad(SDNode *N, SDOperand N0, ExtendNodes.push_back(User); } else { for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { - SDOperand UseOp = User->getOperand(i); + SDValue UseOp = User->getOperand(i); if (UseOp == N0) { // If truncate from extended type to original load type is free // on this target, then it's ok to extend a CopyToReg. @@ -2727,7 +2727,7 @@ static bool ExtendUsesToFormExtLoad(SDNode *N, SDOperand N0, UI != UE; ++UI) { SDNode *User = *UI; for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { - SDOperand UseOp = User->getOperand(i); + SDValue UseOp = User->getOperand(i); if (UseOp.Val == N && UseOp.ResNo == 0) { BothLiveOut = true; break; @@ -2742,8 +2742,8 @@ static bool ExtendUsesToFormExtLoad(SDNode *N, SDOperand N0, return true; } -SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); // fold (sext c1) -> c1 @@ -2758,7 +2758,7 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { if (N0.getOpcode() == ISD::TRUNCATE) { // fold (sext (truncate (load x))) -> (sext (smaller load x)) // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n))) - SDOperand NarrowLoad = ReduceLoadWidth(N0.Val); + SDValue NarrowLoad = ReduceLoadWidth(N0.Val); if (NarrowLoad.Val) { if (NarrowLoad.Val != N0.Val) CombineTo(N0.Val, NarrowLoad); @@ -2767,7 +2767,7 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { // See if the value being truncated is already sign extended. If so, just // eliminate the trunc/sext pair. - SDOperand Op = N0.getOperand(0); + SDValue Op = N0.getOperand(0); unsigned OpBits = Op.getValueType().getSizeInBits(); unsigned MidBits = N0.getValueType().getSizeInBits(); unsigned DestBits = VT.getSizeInBits(); @@ -2812,21 +2812,21 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI); if (DoXform) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), N0.getValueType(), LN0->isVolatile(), LN0->getAlignment()); CombineTo(N, ExtLoad); - SDOperand Trunc = DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad); + SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad); CombineTo(N0.Val, Trunc, ExtLoad.getValue(1)); // Extend SetCC uses if necessary. for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) { SDNode *SetCC = SetCCs[i]; - SmallVector<SDOperand, 4> Ops; + SmallVector<SDValue, 4> Ops; for (unsigned j = 0; j != 2; ++j) { - SDOperand SOp = SetCC->getOperand(j); + SDValue SOp = SetCC->getOperand(j); if (SOp == Trunc) Ops.push_back(ExtLoad); else @@ -2836,7 +2836,7 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { CombineTo(SetCC, DAG.getNode(ISD::SETCC, SetCC->getValueType(0), &Ops[0], Ops.size())); } - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } @@ -2848,7 +2848,7 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { MVT EVT = LN0->getMemoryVT(); if ((!AfterLegalize && !LN0->isVolatile()) || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT)) { - SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT, LN0->isVolatile(), @@ -2856,13 +2856,13 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { CombineTo(N, ExtLoad); CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad), ExtLoad.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } // sext(setcc x,y,cc) -> select_cc x, y, -1, 0, cc if (N0.getOpcode() == ISD::SETCC) { - SDOperand SCC = + SDValue SCC = SimplifySelectCC(N0.getOperand(0), N0.getOperand(1), DAG.getConstant(~0ULL, VT), DAG.getConstant(0, VT), cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); @@ -2874,11 +2874,11 @@ SDOperand DAGCombiner::visitSIGN_EXTEND(SDNode *N) { DAG.SignBitIsZero(N0)) return DAG.getNode(ISD::ZERO_EXTEND, VT, N0); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) { + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); // fold (zext c1) -> c1 @@ -2892,7 +2892,7 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { // fold (zext (truncate (load x))) -> (zext (smaller load x)) // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n))) if (N0.getOpcode() == ISD::TRUNCATE) { - SDOperand NarrowLoad = ReduceLoadWidth(N0.Val); + SDValue NarrowLoad = ReduceLoadWidth(N0.Val); if (NarrowLoad.Val) { if (NarrowLoad.Val != N0.Val) CombineTo(N0.Val, NarrowLoad); @@ -2903,7 +2903,7 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { // fold (zext (truncate x)) -> (and x, mask) if (N0.getOpcode() == ISD::TRUNCATE && (!AfterLegalize || TLI.isOperationLegal(ISD::AND, VT))) { - SDOperand Op = N0.getOperand(0); + SDValue Op = N0.getOperand(0); if (Op.getValueType().bitsLT(VT)) { Op = DAG.getNode(ISD::ANY_EXTEND, VT, Op); } else if (Op.getValueType().bitsGT(VT)) { @@ -2916,7 +2916,7 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::TRUNCATE && N0.getOperand(1).getOpcode() == ISD::Constant) { - SDOperand X = N0.getOperand(0).getOperand(0); + SDValue X = N0.getOperand(0).getOperand(0); if (X.getValueType().bitsLT(VT)) { X = DAG.getNode(ISD::ANY_EXTEND, VT, X); } else if (X.getValueType().bitsGT(VT)) { @@ -2937,21 +2937,21 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI); if (DoXform) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), N0.getValueType(), LN0->isVolatile(), LN0->getAlignment()); CombineTo(N, ExtLoad); - SDOperand Trunc = DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad); + SDValue Trunc = DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad); CombineTo(N0.Val, Trunc, ExtLoad.getValue(1)); // Extend SetCC uses if necessary. for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) { SDNode *SetCC = SetCCs[i]; - SmallVector<SDOperand, 4> Ops; + SmallVector<SDValue, 4> Ops; for (unsigned j = 0; j != 2; ++j) { - SDOperand SOp = SetCC->getOperand(j); + SDValue SOp = SetCC->getOperand(j); if (SOp == Trunc) Ops.push_back(ExtLoad); else @@ -2961,7 +2961,7 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { CombineTo(SetCC, DAG.getNode(ISD::SETCC, SetCC->getValueType(0), &Ops[0], Ops.size())); } - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } @@ -2973,7 +2973,7 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { MVT EVT = LN0->getMemoryVT(); if ((!AfterLegalize && !LN0->isVolatile()) || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT)) { - SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT, LN0->isVolatile(), @@ -2981,24 +2981,24 @@ SDOperand DAGCombiner::visitZERO_EXTEND(SDNode *N) { CombineTo(N, ExtLoad); CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad), ExtLoad.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc if (N0.getOpcode() == ISD::SETCC) { - SDOperand SCC = + SDValue SCC = SimplifySelectCC(N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, VT), DAG.getConstant(0, VT), cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); if (SCC.Val) return SCC; } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) { + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); // fold (aext c1) -> c1 @@ -3015,7 +3015,7 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { // fold (aext (truncate (load x))) -> (aext (smaller load x)) // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n))) if (N0.getOpcode() == ISD::TRUNCATE) { - SDOperand NarrowLoad = ReduceLoadWidth(N0.Val); + SDValue NarrowLoad = ReduceLoadWidth(N0.Val); if (NarrowLoad.Val) { if (NarrowLoad.Val != N0.Val) CombineTo(N0.Val, NarrowLoad); @@ -3025,7 +3025,7 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { // fold (aext (truncate x)) if (N0.getOpcode() == ISD::TRUNCATE) { - SDOperand TruncOp = N0.getOperand(0); + SDValue TruncOp = N0.getOperand(0); if (TruncOp.getValueType() == VT) return TruncOp; // x iff x size == zext size. if (TruncOp.getValueType().bitsGT(VT)) @@ -3037,7 +3037,7 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::TRUNCATE && N0.getOperand(1).getOpcode() == ISD::Constant) { - SDOperand X = N0.getOperand(0).getOperand(0); + SDValue X = N0.getOperand(0).getOperand(0); if (X.getValueType().bitsLT(VT)) { X = DAG.getNode(ISD::ANY_EXTEND, VT, X); } else if (X.getValueType().bitsGT(VT)) { @@ -3053,7 +3053,7 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) || TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - SDOperand ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), N0.getValueType(), @@ -3062,7 +3062,7 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { CombineTo(N, ExtLoad); CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad), ExtLoad.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } // fold (aext (zextload x)) -> (aext (truncate (zextload x))) @@ -3073,7 +3073,7 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { N0.hasOneUse()) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); MVT EVT = LN0->getMemoryVT(); - SDOperand ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT, + SDValue ExtLoad = DAG.getExtLoad(LN0->getExtensionType(), VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT, @@ -3082,12 +3082,12 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { CombineTo(N, ExtLoad); CombineTo(N0.Val, DAG.getNode(ISD::TRUNCATE, N0.getValueType(), ExtLoad), ExtLoad.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc if (N0.getOpcode() == ISD::SETCC) { - SDOperand SCC = + SDValue SCC = SimplifySelectCC(N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, VT), DAG.getConstant(0, VT), cast<CondCodeSDNode>(N0.getOperand(2))->get(), true); @@ -3095,13 +3095,13 @@ SDOperand DAGCombiner::visitANY_EXTEND(SDNode *N) { return SCC; } - return SDOperand(); + return SDValue(); } /// GetDemandedBits - See if the specified operand can be simplified with the /// knowledge that only the bits specified by Mask are used. If so, return the -/// simpler operand, otherwise return a null SDOperand. -SDOperand DAGCombiner::GetDemandedBits(SDOperand V, const APInt &Mask) { +/// simpler operand, otherwise return a null SDValue. +SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) { switch (V.getOpcode()) { default: break; case ISD::OR: @@ -3120,14 +3120,14 @@ SDOperand DAGCombiner::GetDemandedBits(SDOperand V, const APInt &Mask) { // See if we can recursively simplify the LHS. unsigned Amt = RHSC->getValue(); APInt NewMask = Mask << Amt; - SDOperand SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask); + SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask); if (SimplifyLHS.Val) { return DAG.getNode(ISD::SRL, V.getValueType(), SimplifyLHS, V.getOperand(1)); } } } - return SDOperand(); + return SDValue(); } /// ReduceLoadWidth - If the result of a wider load is shifted to right of N @@ -3135,10 +3135,10 @@ SDOperand DAGCombiner::GetDemandedBits(SDOperand V, const APInt &Mask) { /// of number of bits of the narrower type, transform it to a narrower load /// from address + N / num of bits of new type. If the result is to be /// extended, also fold the extension to form a extending load. -SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { +SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) { unsigned Opc = N->getOpcode(); ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; - SDOperand N0 = N->getOperand(0); + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); MVT EVT = N->getValueType(0); @@ -3148,7 +3148,7 @@ SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { ExtType = ISD::SEXTLOAD; EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); if (AfterLegalize && !TLI.isLoadXLegal(ISD::SEXTLOAD, EVT)) - return SDOperand(); + return SDValue(); } unsigned EVTBits = EVT.getSizeInBits(); @@ -3161,7 +3161,7 @@ SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { if ((ShAmt & (EVTBits-1)) == 0) { N0 = N0.getOperand(0); if (N0.getValueType().getSizeInBits() <= EVTBits) - return SDOperand(); + return SDValue(); CombineSRL = true; } } @@ -3185,10 +3185,10 @@ SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { } uint64_t PtrOff = ShAmt / 8; unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff); - SDOperand NewPtr = DAG.getNode(ISD::ADD, PtrType, LN0->getBasePtr(), + SDValue NewPtr = DAG.getNode(ISD::ADD, PtrType, LN0->getBasePtr(), DAG.getConstant(PtrOff, PtrType)); AddToWorkList(NewPtr.Val); - SDOperand Load = (ExtType == ISD::NON_EXTLOAD) + SDValue Load = (ExtType == ISD::NON_EXTLOAD) ? DAG.getLoad(VT, LN0->getChain(), NewPtr, LN0->getSrcValue(), LN0->getSrcValueOffset(), LN0->isVolatile(), NewAlign) @@ -3209,16 +3209,16 @@ SDOperand DAGCombiner::ReduceLoadWidth(SDNode *N) { else return DAG.getNode(Opc, VT, Load); } - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); MVT VT = N->getValueType(0); MVT EVT = cast<VTSDNode>(N1)->getVT(); unsigned VTBits = VT.getSizeInBits(); @@ -3244,12 +3244,12 @@ SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { // fold operands of sext_in_reg based on knowledge that the top bits are not // demanded. - if (SimplifyDemandedBits(SDOperand(N, 0))) - return SDOperand(N, 0); + if (SimplifyDemandedBits(SDValue(N, 0))) + return SDValue(N, 0); // fold (sext_in_reg (load x)) -> (smaller sextload x) // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits)) - SDOperand NarrowLoad = ReduceLoadWidth(N); + SDValue NarrowLoad = ReduceLoadWidth(N); if (NarrowLoad.Val) return NarrowLoad; @@ -3274,14 +3274,14 @@ SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT, LN0->isVolatile(), LN0->getAlignment()); CombineTo(N, ExtLoad); CombineTo(N0.Val, ExtLoad, ExtLoad.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use if (ISD::isZEXTLoad(N0.Val) && ISD::isUNINDEXEDLoad(N0.Val) && @@ -3290,20 +3290,20 @@ SDOperand DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) || TLI.isLoadXLegal(ISD::SEXTLOAD, EVT))) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - SDOperand ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), EVT, LN0->isVolatile(), LN0->getAlignment()); CombineTo(N, ExtLoad); CombineTo(N0.Val, ExtLoad, ExtLoad.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitTRUNCATE(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); // noop truncate @@ -3333,7 +3333,7 @@ SDOperand DAGCombiner::visitTRUNCATE(SDNode *N) { // See if we can simplify the input to this truncate through knowledge that // only the low bits are being used. For example "trunc (or (shl x, 8), y)" // -> trunc y - SDOperand Shorter = + SDValue Shorter = GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(), VT.getSizeInBits())); if (Shorter.Val) @@ -3345,7 +3345,7 @@ SDOperand DAGCombiner::visitTRUNCATE(SDNode *N) { } static SDNode *getBuildPairElt(SDNode *N, unsigned i) { - SDOperand Elt = N->getOperand(i); + SDValue Elt = N->getOperand(i); if (Elt.getOpcode() != ISD::MERGE_VALUES) return Elt.Val; return Elt.getOperand(Elt.ResNo).Val; @@ -3353,12 +3353,12 @@ static SDNode *getBuildPairElt(SDNode *N, unsigned i) { /// CombineConsecutiveLoads - build_pair (load, load) -> load /// if load locations are consecutive. -SDOperand DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT VT) { +SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT VT) { assert(N->getOpcode() == ISD::BUILD_PAIR); SDNode *LD1 = getBuildPairElt(N, 0); if (!ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse()) - return SDOperand(); + return SDValue(); MVT LD1VT = LD1->getValueType(0); SDNode *LD2 = getBuildPairElt(N, 1); const MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); @@ -3379,11 +3379,11 @@ SDOperand DAGCombiner::CombineConsecutiveLoads(SDNode *N, MVT VT) { LD->getSrcValue(), LD->getSrcValueOffset(), false, Align); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) { + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); // If the input is a BUILD_VECTOR with all constant elements, fold this now. @@ -3412,7 +3412,7 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { // If the input is a constant, let getNode() fold it. if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) { - SDOperand Res = DAG.getNode(ISD::BIT_CONVERT, VT, N0); + SDValue Res = DAG.getNode(ISD::BIT_CONVERT, VT, N0); if (Res.Val != N) return Res; } @@ -3430,7 +3430,7 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { getABITypeAlignment(VT.getTypeForMVT()); unsigned OrigAlign = LN0->getAlignment(); if (Align <= OrigAlign) { - SDOperand Load = DAG.getLoad(VT, LN0->getChain(), LN0->getBasePtr(), + SDValue Load = DAG.getLoad(VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), LN0->isVolatile(), OrigAlign); AddToWorkList(N); @@ -3445,7 +3445,7 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { // This often reduces constant pool loads. if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) && N0.Val->hasOneUse() && VT.isInteger() && !VT.isVector()) { - SDOperand NewConv = DAG.getNode(ISD::BIT_CONVERT, VT, N0.getOperand(0)); + SDValue NewConv = DAG.getNode(ISD::BIT_CONVERT, VT, N0.getOperand(0)); AddToWorkList(NewConv.Val); APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); @@ -3462,7 +3462,7 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { isa<ConstantFPSDNode>(N0.getOperand(0)) && VT.isInteger() && !VT.isVector()) { unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits(); - SDOperand X = DAG.getNode(ISD::BIT_CONVERT, + SDValue X = DAG.getNode(ISD::BIT_CONVERT, MVT::getIntegerVT(OrigXWidth), N0.getOperand(1)); AddToWorkList(X.Val); @@ -3486,7 +3486,7 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { X = DAG.getNode(ISD::AND, VT, X, DAG.getConstant(SignBit, VT)); AddToWorkList(X.Val); - SDOperand Cst = DAG.getNode(ISD::BIT_CONVERT, VT, N0.getOperand(0)); + SDValue Cst = DAG.getNode(ISD::BIT_CONVERT, VT, N0.getOperand(0)); Cst = DAG.getNode(ISD::AND, VT, Cst, DAG.getConstant(~SignBit, VT)); AddToWorkList(Cst.Val); @@ -3495,15 +3495,15 @@ SDOperand DAGCombiner::visitBIT_CONVERT(SDNode *N) { // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive. if (N0.getOpcode() == ISD::BUILD_PAIR) { - SDOperand CombineLD = CombineConsecutiveLoads(N0.Val, VT); + SDValue CombineLD = CombineConsecutiveLoads(N0.Val, VT); if (CombineLD.Val) return CombineLD; } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitBUILD_PAIR(SDNode *N) { +SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) { MVT VT = N->getValueType(0); return CombineConsecutiveLoads(N, VT); } @@ -3511,12 +3511,12 @@ SDOperand DAGCombiner::visitBUILD_PAIR(SDNode *N) { /// ConstantFoldBIT_CONVERTofBUILD_VECTOR - We know that BV is a build_vector /// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the /// destination element value type. -SDOperand DAGCombiner:: +SDValue DAGCombiner:: ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) { MVT SrcEltVT = BV->getOperand(0).getValueType(); // If this is already the right type, we're done. - if (SrcEltVT == DstEltVT) return SDOperand(BV, 0); + if (SrcEltVT == DstEltVT) return SDValue(BV, 0); unsigned SrcBitSize = SrcEltVT.getSizeInBits(); unsigned DstBitSize = DstEltVT.getSizeInBits(); @@ -3524,7 +3524,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) { // If this is a conversion of N elements of one type to N elements of another // type, convert each element. This handles FP<->INT cases. if (SrcBitSize == DstBitSize) { - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, DstEltVT, BV->getOperand(i))); AddToWorkList(Ops.back().Val); @@ -3563,7 +3563,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) { if (SrcBitSize < DstBitSize) { unsigned NumInputsPerOutput = DstBitSize/SrcBitSize; - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; for (unsigned i = 0, e = BV->getNumOperands(); i != e; i += NumInputsPerOutput) { bool isLE = TLI.isLittleEndian(); @@ -3572,7 +3572,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) { for (unsigned j = 0; j != NumInputsPerOutput; ++j) { // Shift the previously computed bits over. NewBits <<= SrcBitSize; - SDOperand Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j)); + SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j)); if (Op.getOpcode() == ISD::UNDEF) continue; EltIsUndef = false; @@ -3595,7 +3595,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) { bool isS2V = ISD::isScalarToVector(BV); unsigned NumOutputsPerInput = SrcBitSize/DstBitSize; MVT VT = MVT::getVectorVT(DstEltVT, NumOutputsPerInput*BV->getNumOperands()); - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { if (BV->getOperand(i).getOpcode() == ISD::UNDEF) { for (unsigned j = 0; j != NumOutputsPerInput; ++j) @@ -3621,16 +3621,16 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, MVT DstEltVT) { -SDOperand DAGCombiner::visitFADD(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitFADD(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); MVT VT = N->getValueType(0); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -3655,19 +3655,19 @@ SDOperand DAGCombiner::visitFADD(SDNode *N) { return DAG.getNode(ISD::FADD, VT, N0.getOperand(0), DAG.getNode(ISD::FADD, VT, N0.getOperand(1), N1)); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFSUB(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitFSUB(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); MVT VT = N->getValueType(0); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -3685,19 +3685,19 @@ SDOperand DAGCombiner::visitFSUB(SDNode *N) { return DAG.getNode(ISD::FADD, VT, N0, GetNegatedExpression(N1, DAG, AfterLegalize)); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFMUL(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitFMUL(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); MVT VT = N->getValueType(0); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -3732,19 +3732,19 @@ SDOperand DAGCombiner::visitFMUL(SDNode *N) { return DAG.getNode(ISD::FMUL, VT, N0.getOperand(0), DAG.getNode(ISD::FMUL, VT, N0.getOperand(1), N1)); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFDIV(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitFDIV(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); MVT VT = N->getValueType(0); // fold vector ops if (VT.isVector()) { - SDOperand FoldedVOp = SimplifyVBinOp(N); + SDValue FoldedVOp = SimplifyVBinOp(N); if (FoldedVOp.Val) return FoldedVOp; } @@ -3765,12 +3765,12 @@ SDOperand DAGCombiner::visitFDIV(SDNode *N) { } } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFREM(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitFREM(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); MVT VT = N->getValueType(0); @@ -3779,12 +3779,12 @@ SDOperand DAGCombiner::visitFREM(SDNode *N) { if (N0CFP && N1CFP && VT != MVT::ppcf128) return DAG.getNode(ISD::FREM, VT, N0, N1); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFCOPYSIGN(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); MVT VT = N->getValueType(0); @@ -3822,13 +3822,13 @@ SDOperand DAGCombiner::visitFCOPYSIGN(SDNode *N) { if (N1.getOpcode() == ISD::FP_EXTEND || N1.getOpcode() == ISD::FP_ROUND) return DAG.getNode(ISD::FCOPYSIGN, VT, N0, N1.getOperand(0)); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSINT_TO_FP(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) { + SDValue N0 = N->getOperand(0); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); MVT VT = N->getValueType(0); MVT OpVT = N0.getValueType(); @@ -3847,11 +3847,11 @@ SDOperand DAGCombiner::visitSINT_TO_FP(SDNode *N) { } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitUINT_TO_FP(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) { + SDValue N0 = N->getOperand(0); ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); MVT VT = N->getValueType(0); MVT OpVT = N0.getValueType(); @@ -3869,34 +3869,34 @@ SDOperand DAGCombiner::visitUINT_TO_FP(SDNode *N) { return DAG.getNode(ISD::SINT_TO_FP, VT, N0); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFP_TO_SINT(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) { + SDValue N0 = N->getOperand(0); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); MVT VT = N->getValueType(0); // fold (fp_to_sint c1fp) -> c1 if (N0CFP) return DAG.getNode(ISD::FP_TO_SINT, VT, N0); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFP_TO_UINT(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) { + SDValue N0 = N->getOperand(0); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); MVT VT = N->getValueType(0); // fold (fp_to_uint c1fp) -> c1 if (N0CFP && VT != MVT::ppcf128) return DAG.getNode(ISD::FP_TO_UINT, VT, N0); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFP_ROUND(SDNode *N) { - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); +SDValue DAGCombiner::visitFP_ROUND(SDNode *N) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); MVT VT = N->getValueType(0); @@ -3919,37 +3919,37 @@ SDOperand DAGCombiner::visitFP_ROUND(SDNode *N) { // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y) if (N0.getOpcode() == ISD::FCOPYSIGN && N0.Val->hasOneUse()) { - SDOperand Tmp = DAG.getNode(ISD::FP_ROUND, VT, N0.getOperand(0), N1); + SDValue Tmp = DAG.getNode(ISD::FP_ROUND, VT, N0.getOperand(0), N1); AddToWorkList(Tmp.Val); return DAG.getNode(ISD::FCOPYSIGN, VT, Tmp, N0.getOperand(1)); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { + SDValue N0 = N->getOperand(0); MVT VT = N->getValueType(0); MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); // fold (fp_round_inreg c1fp) -> c1fp if (N0CFP) { - SDOperand Round = DAG.getConstantFP(N0CFP->getValueAPF(), EVT); + SDValue Round = DAG.getConstantFP(N0CFP->getValueAPF(), EVT); return DAG.getNode(ISD::FP_EXTEND, VT, Round); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFP_EXTEND(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { + SDValue N0 = N->getOperand(0); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); MVT VT = N->getValueType(0); // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. if (N->hasOneUse() && - N->use_begin().getUse().getSDOperand().getOpcode() == ISD::FP_ROUND) - return SDOperand(); + N->use_begin().getUse().getSDValue().getOpcode() == ISD::FP_ROUND) + return SDValue(); // fold (fp_extend c1fp) -> c1fp if (N0CFP && VT != MVT::ppcf128) @@ -3958,7 +3958,7 @@ SDOperand DAGCombiner::visitFP_EXTEND(SDNode *N) { // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the // value of X. if (N0.getOpcode() == ISD::FP_ROUND && N0.Val->getConstantOperandVal(1) == 1){ - SDOperand In = N0.getOperand(0); + SDValue In = N0.getOperand(0); if (In.getValueType() == VT) return In; if (VT.bitsLT(In.getValueType())) return DAG.getNode(ISD::FP_ROUND, VT, In, N0.getOperand(1)); @@ -3970,7 +3970,7 @@ SDOperand DAGCombiner::visitFP_EXTEND(SDNode *N) { ((!AfterLegalize && !cast<LoadSDNode>(N0)->isVolatile()) || TLI.isLoadXLegal(ISD::EXTLOAD, N0.getValueType()))) { LoadSDNode *LN0 = cast<LoadSDNode>(N0); - SDOperand ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(), + SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, VT, LN0->getChain(), LN0->getBasePtr(), LN0->getSrcValue(), LN0->getSrcValueOffset(), N0.getValueType(), @@ -3980,14 +3980,14 @@ SDOperand DAGCombiner::visitFP_EXTEND(SDNode *N) { CombineTo(N0.Val, DAG.getNode(ISD::FP_ROUND, N0.getValueType(), ExtLoad, DAG.getIntPtrConstant(1)), ExtLoad.getValue(1)); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFNEG(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitFNEG(SDNode *N) { + SDValue N0 = N->getOperand(0); if (isNegatibleForFree(N0, AfterLegalize)) return GetNegatedExpression(N0, DAG, AfterLegalize); @@ -3997,7 +3997,7 @@ SDOperand DAGCombiner::visitFNEG(SDNode *N) { if (N0.getOpcode() == ISD::BIT_CONVERT && N0.Val->hasOneUse() && N0.getOperand(0).getValueType().isInteger() && !N0.getOperand(0).getValueType().isVector()) { - SDOperand Int = N0.getOperand(0); + SDValue Int = N0.getOperand(0); MVT IntVT = Int.getValueType(); if (IntVT.isInteger() && !IntVT.isVector()) { Int = DAG.getNode(ISD::XOR, IntVT, Int, @@ -4007,11 +4007,11 @@ SDOperand DAGCombiner::visitFNEG(SDNode *N) { } } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitFABS(SDNode *N) { - SDOperand N0 = N->getOperand(0); +SDValue DAGCombiner::visitFABS(SDNode *N) { + SDValue N0 = N->getOperand(0); ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); MVT VT = N->getValueType(0); @@ -4031,7 +4031,7 @@ SDOperand DAGCombiner::visitFABS(SDNode *N) { if (N0.getOpcode() == ISD::BIT_CONVERT && N0.Val->hasOneUse() && N0.getOperand(0).getValueType().isInteger() && !N0.getOperand(0).getValueType().isVector()) { - SDOperand Int = N0.getOperand(0); + SDValue Int = N0.getOperand(0); MVT IntVT = Int.getValueType(); if (IntVT.isInteger() && !IntVT.isVector()) { Int = DAG.getNode(ISD::AND, IntVT, Int, @@ -4041,13 +4041,13 @@ SDOperand DAGCombiner::visitFABS(SDNode *N) { } } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitBRCOND(SDNode *N) { - SDOperand Chain = N->getOperand(0); - SDOperand N1 = N->getOperand(1); - SDOperand N2 = N->getOperand(2); +SDValue DAGCombiner::visitBRCOND(SDNode *N) { + SDValue Chain = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue N2 = N->getOperand(2); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); // never taken branch, fold to chain @@ -4063,17 +4063,17 @@ SDOperand DAGCombiner::visitBRCOND(SDNode *N) { return DAG.getNode(ISD::BR_CC, MVT::Other, Chain, N1.getOperand(2), N1.getOperand(0), N1.getOperand(1), N2); } - return SDOperand(); + return SDValue(); } // Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB. // -SDOperand DAGCombiner::visitBR_CC(SDNode *N) { +SDValue DAGCombiner::visitBR_CC(SDNode *N) { CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1)); - SDOperand CondLHS = N->getOperand(2), CondRHS = N->getOperand(3); + SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3); // Use SimplifySetCC to simplify SETCC's. - SDOperand Simp = SimplifySetCC(MVT::i1, CondLHS, CondRHS, CC->get(), false); + SDValue Simp = SimplifySetCC(MVT::i1, CondLHS, CondRHS, CC->get(), false); if (Simp.Val) AddToWorkList(Simp.Val); ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(Simp.Val); @@ -4091,7 +4091,7 @@ SDOperand DAGCombiner::visitBR_CC(SDNode *N) { return DAG.getNode(ISD::BR_CC, MVT::Other, N->getOperand(0), Simp.getOperand(2), Simp.getOperand(0), Simp.getOperand(1), N->getOperand(4)); - return SDOperand(); + return SDValue(); } @@ -4106,7 +4106,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { return false; bool isLoad = true; - SDOperand Ptr; + SDValue Ptr; MVT VT; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { if (LD->isIndexed()) @@ -4135,8 +4135,8 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { return false; // Ask the target to do addressing mode selection. - SDOperand BasePtr; - SDOperand Offset; + SDValue BasePtr; + SDValue Offset; ISD::MemIndexedMode AM = ISD::UNINDEXED; if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG)) return false; @@ -4160,7 +4160,7 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { // Check #2. if (!isLoad) { - SDOperand Val = cast<StoreSDNode>(N)->getValue(); + SDValue Val = cast<StoreSDNode>(N)->getValue(); if (Val == BasePtr || BasePtr.Val->isPredecessorOf(Val.Val)) return false; } @@ -4184,11 +4184,11 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { if (!RealUse) return false; - SDOperand Result; + SDValue Result; if (isLoad) - Result = DAG.getIndexedLoad(SDOperand(N,0), BasePtr, Offset, AM); + Result = DAG.getIndexedLoad(SDValue(N,0), BasePtr, Offset, AM); else - Result = DAG.getIndexedStore(SDOperand(N,0), BasePtr, Offset, AM); + Result = DAG.getIndexedStore(SDValue(N,0), BasePtr, Offset, AM); ++PreIndexedNodes; ++NodesCombined; DOUT << "\nReplacing.4 "; DEBUG(N->dump(&DAG)); @@ -4196,12 +4196,12 @@ bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { DOUT << '\n'; WorkListRemover DeadNodes(*this); if (isLoad) { - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(0), + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0), &DeadNodes); - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 1), Result.getValue(2), + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2), &DeadNodes); } else { - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(1), + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1), &DeadNodes); } @@ -4227,7 +4227,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { return false; bool isLoad = true; - SDOperand Ptr; + SDValue Ptr; MVT VT; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { if (LD->isIndexed()) @@ -4259,8 +4259,8 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)) continue; - SDOperand BasePtr; - SDOperand Offset; + SDValue BasePtr; + SDValue Offset; ISD::MemIndexedMode AM = ISD::UNINDEXED; if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) { if (Ptr == Offset) @@ -4311,9 +4311,9 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { // Check for #2 if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) { - SDOperand Result = isLoad - ? DAG.getIndexedLoad(SDOperand(N,0), BasePtr, Offset, AM) - : DAG.getIndexedStore(SDOperand(N,0), BasePtr, Offset, AM); + SDValue Result = isLoad + ? DAG.getIndexedLoad(SDValue(N,0), BasePtr, Offset, AM) + : DAG.getIndexedStore(SDValue(N,0), BasePtr, Offset, AM); ++PostIndexedNodes; ++NodesCombined; DOUT << "\nReplacing.5 "; DEBUG(N->dump(&DAG)); @@ -4321,12 +4321,12 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { DOUT << '\n'; WorkListRemover DeadNodes(*this); if (isLoad) { - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(0), + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0), &DeadNodes); - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 1), Result.getValue(2), + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2), &DeadNodes); } else { - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result.getValue(1), + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1), &DeadNodes); } @@ -4334,7 +4334,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { DAG.DeleteNode(N); // Replace the uses of Use with uses of the updated base value. - DAG.ReplaceAllUsesOfValueWith(SDOperand(Op, 0), + DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0), Result.getValue(isLoad ? 1 : 0), &DeadNodes); removeFromWorkList(Op); @@ -4348,7 +4348,7 @@ bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { /// InferAlignment - If we can infer some alignment information from this /// pointer, return it. -static unsigned InferAlignment(SDOperand Ptr, SelectionDAG &DAG) { +static unsigned InferAlignment(SDValue Ptr, SelectionDAG &DAG) { // If this is a direct reference to a stack slot, use information about the // stack slot's alignment. int FrameIdx = 1 << 31; @@ -4389,10 +4389,10 @@ static unsigned InferAlignment(SDOperand Ptr, SelectionDAG &DAG) { return 0; } -SDOperand DAGCombiner::visitLOAD(SDNode *N) { +SDValue DAGCombiner::visitLOAD(SDNode *N) { LoadSDNode *LD = cast<LoadSDNode>(N); - SDOperand Chain = LD->getChain(); - SDOperand Ptr = LD->getBasePtr(); + SDValue Chain = LD->getChain(); + SDValue Ptr = LD->getBasePtr(); // Try to infer better alignment information than the load already has. if (LD->isUnindexed()) { @@ -4423,30 +4423,30 @@ SDOperand DAGCombiner::visitLOAD(SDNode *N) { DOUT << "\nWith chain: "; DEBUG(Chain.Val->dump(&DAG)); DOUT << "\n"; WorkListRemover DeadNodes(*this); - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 1), Chain, &DeadNodes); + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain, &DeadNodes); if (N->use_empty()) { removeFromWorkList(N); DAG.DeleteNode(N); } - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } else { // Indexed loads. assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?"); if (N->hasNUsesOfValue(0, 0) && N->hasNUsesOfValue(0, 1)) { - SDOperand Undef = DAG.getNode(ISD::UNDEF, N->getValueType(0)); + SDValue Undef = DAG.getNode(ISD::UNDEF, N->getValueType(0)); DOUT << "\nReplacing.6 "; DEBUG(N->dump(&DAG)); DOUT << "\nWith: "; DEBUG(Undef.Val->dump(&DAG)); DOUT << " and 2 other values\n"; WorkListRemover DeadNodes(*this); - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Undef, &DeadNodes); - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 1), + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef, &DeadNodes); + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), DAG.getNode(ISD::UNDEF, N->getValueType(1)), &DeadNodes); - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 2), Chain, &DeadNodes); + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain, &DeadNodes); removeFromWorkList(N); DAG.DeleteNode(N); - return SDOperand(N, 0); // Return N so it doesn't get rechecked! + return SDValue(N, 0); // Return N so it doesn't get rechecked! } } } @@ -4467,11 +4467,11 @@ SDOperand DAGCombiner::visitLOAD(SDNode *N) { if (CombinerAA) { // Walk up chain skipping non-aliasing memory nodes. - SDOperand BetterChain = FindBetterChain(N, Chain); + SDValue BetterChain = FindBetterChain(N, Chain); // If there is a better chain. if (Chain != BetterChain) { - SDOperand ReplLoad; + SDValue ReplLoad; // Replace the chain to void dependency. if (LD->getExtensionType() == ISD::NON_EXTLOAD) { @@ -4489,7 +4489,7 @@ SDOperand DAGCombiner::visitLOAD(SDNode *N) { } // Create token factor to keep old chain connected. - SDOperand Token = DAG.getNode(ISD::TokenFactor, MVT::Other, + SDValue Token = DAG.getNode(ISD::TokenFactor, MVT::Other, Chain, ReplLoad.getValue(1)); // Replace uses with load result and token factor. Don't add users @@ -4500,17 +4500,17 @@ SDOperand DAGCombiner::visitLOAD(SDNode *N) { // Try transforming N to an indexed load. if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) - return SDOperand(N, 0); + return SDValue(N, 0); - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitSTORE(SDNode *N) { +SDValue DAGCombiner::visitSTORE(SDNode *N) { StoreSDNode *ST = cast<StoreSDNode>(N); - SDOperand Chain = ST->getChain(); - SDOperand Value = ST->getValue(); - SDOperand Ptr = ST->getBasePtr(); + SDValue Chain = ST->getChain(); + SDValue Value = ST->getValue(); + SDValue Ptr = ST->getBasePtr(); // Try to infer better alignment information than the store already has. if (ST->isUnindexed()) { @@ -4544,7 +4544,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { // processor operation but an i64 (which is not legal) requires two. So the // transform should not be done in this case. if (Value.getOpcode() != ISD::TargetConstantFP) { - SDOperand Tmp; + SDValue Tmp; switch (CFP->getValueType(0).getSimpleVT()) { default: assert(0 && "Unknown FP type"); case MVT::f80: // We don't do this for these yet. @@ -4575,22 +4575,22 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { // argument passing. Since this is so common, custom legalize the // 64-bit integer store into two 32-bit stores. uint64_t Val = CFP->getValueAPF().convertToAPInt().getZExtValue(); - SDOperand Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32); - SDOperand Hi = DAG.getConstant(Val >> 32, MVT::i32); + SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32); + SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32); if (TLI.isBigEndian()) std::swap(Lo, Hi); int SVOffset = ST->getSrcValueOffset(); unsigned Alignment = ST->getAlignment(); bool isVolatile = ST->isVolatile(); - SDOperand St0 = DAG.getStore(Chain, Lo, Ptr, ST->getSrcValue(), + SDValue St0 = DAG.getStore(Chain, Lo, Ptr, ST->getSrcValue(), ST->getSrcValueOffset(), isVolatile, ST->getAlignment()); Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr, DAG.getConstant(4, Ptr.getValueType())); SVOffset += 4; Alignment = MinAlign(Alignment, 4U); - SDOperand St1 = DAG.getStore(Chain, Hi, Ptr, ST->getSrcValue(), + SDValue St1 = DAG.getStore(Chain, Hi, Ptr, ST->getSrcValue(), SVOffset, isVolatile, Alignment); return DAG.getNode(ISD::TokenFactor, MVT::Other, St0, St1); } @@ -4601,12 +4601,12 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { if (CombinerAA) { // Walk up chain skipping non-aliasing memory nodes. - SDOperand BetterChain = FindBetterChain(N, Chain); + SDValue BetterChain = FindBetterChain(N, Chain); // If there is a better chain. if (Chain != BetterChain) { // Replace the chain to avoid dependency. - SDOperand ReplStore; + SDValue ReplStore; if (ST->isTruncatingStore()) { ReplStore = DAG.getTruncStore(BetterChain, Value, Ptr, ST->getSrcValue(),ST->getSrcValueOffset(), @@ -4619,7 +4619,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { } // Create token to keep both nodes around. - SDOperand Token = + SDValue Token = DAG.getNode(ISD::TokenFactor, MVT::Other, Chain, ReplStore); // Don't add users to work list. @@ -4629,7 +4629,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { // Try transforming N to an indexed store. if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) - return SDOperand(N, 0); + return SDValue(N, 0); // FIXME: is there such a thing as a truncating indexed store? if (ST->isTruncatingStore() && ST->isUnindexed() && @@ -4637,7 +4637,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { // See if we can simplify the input to this truncstore with knowledge that // only the low bits are being used. For example: // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8" - SDOperand Shorter = + SDValue Shorter = GetDemandedBits(Value, APInt::getLowBitsSet(Value.getValueSizeInBits(), ST->getMemoryVT().getSizeInBits())); @@ -4653,7 +4653,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { APInt::getLowBitsSet( Value.getValueSizeInBits(), ST->getMemoryVT().getSizeInBits()))) - return SDOperand(N, 0); + return SDValue(N, 0); } // If this is a load followed by a store to the same location, then the store @@ -4663,7 +4663,7 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { ST->isUnindexed() && !ST->isVolatile() && // There can't be any side effects between the load and store, such as // a call or store. - Chain.reachesChainWithoutSideEffects(SDOperand(Ld, 1))) { + Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) { // The store is dead, remove it. return Chain; } @@ -4680,39 +4680,39 @@ SDOperand DAGCombiner::visitSTORE(SDNode *N) { ST->isVolatile(), ST->getAlignment()); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { - SDOperand InVec = N->getOperand(0); - SDOperand InVal = N->getOperand(1); - SDOperand EltNo = N->getOperand(2); +SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { + SDValue InVec = N->getOperand(0); + SDValue InVal = N->getOperand(1); + SDValue EltNo = N->getOperand(2); // If the invec is a BUILD_VECTOR and if EltNo is a constant, build a new // vector with the inserted element. if (InVec.getOpcode() == ISD::BUILD_VECTOR && isa<ConstantSDNode>(EltNo)) { unsigned Elt = cast<ConstantSDNode>(EltNo)->getValue(); - SmallVector<SDOperand, 8> Ops(InVec.Val->op_begin(), InVec.Val->op_end()); + SmallVector<SDValue, 8> Ops(InVec.Val->op_begin(), InVec.Val->op_end()); if (Elt < Ops.size()) Ops[Elt] = InVal; return DAG.getNode(ISD::BUILD_VECTOR, InVec.getValueType(), &Ops[0], Ops.size()); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { +SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size) // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size) // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr) // Perform only after legalization to ensure build_vector / vector_shuffle // optimizations have already been done. - if (!AfterLegalize) return SDOperand(); + if (!AfterLegalize) return SDValue(); - SDOperand InVec = N->getOperand(0); - SDOperand EltNo = N->getOperand(1); + SDValue InVec = N->getOperand(0); + SDValue EltNo = N->getOperand(1); if (isa<ConstantSDNode>(EltNo)) { unsigned Elt = cast<ConstantSDNode>(EltNo)->getValue(); @@ -4723,7 +4723,7 @@ SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { if (InVec.getOpcode() == ISD::BIT_CONVERT) { MVT BCVT = InVec.getOperand(0).getValueType(); if (!BCVT.isVector() || EVT.bitsGT(BCVT.getVectorElementType())) - return SDOperand(); + return SDValue(); InVec = InVec.getOperand(0); EVT = BCVT.getVectorElementType(); NewLoad = true; @@ -4752,7 +4752,7 @@ SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { } } if (!LN0 || !LN0->hasOneUse() || LN0->isVolatile()) - return SDOperand(); + return SDValue(); unsigned Align = LN0->getAlignment(); if (NewLoad) { @@ -4761,11 +4761,11 @@ SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { unsigned NewAlign = TLI.getTargetMachine().getTargetData()-> getABITypeAlignment(LVT.getTypeForMVT()); if (NewAlign > Align || !TLI.isOperationLegal(ISD::LOAD, LVT)) - return SDOperand(); + return SDValue(); Align = NewAlign; } - SDOperand NewPtr = LN0->getBasePtr(); + SDValue NewPtr = LN0->getBasePtr(); if (Elt) { unsigned PtrOff = LVT.getSizeInBits() * Elt / 8; MVT PtrType = NewPtr.getValueType(); @@ -4778,11 +4778,11 @@ SDOperand DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { LN0->getSrcValue(), LN0->getSrcValueOffset(), LN0->isVolatile(), Align); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitBUILD_VECTOR(SDNode *N) { +SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { unsigned NumInScalars = N->getNumOperands(); MVT VT = N->getValueType(0); unsigned NumElts = VT.getVectorNumElements(); @@ -4791,7 +4791,7 @@ SDOperand DAGCombiner::visitBUILD_VECTOR(SDNode *N) { // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT // operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from // at most two distinct vectors, turn this into a shuffle node. - SDOperand VecIn1, VecIn2; + SDValue VecIn1, VecIn2; for (unsigned i = 0; i != NumInScalars; ++i) { // Ignore undef inputs. if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; @@ -4800,15 +4800,15 @@ SDOperand DAGCombiner::visitBUILD_VECTOR(SDNode *N) { // constant index, bail out. if (N->getOperand(i).getOpcode() != ISD::EXTRACT_VECTOR_ELT || !isa<ConstantSDNode>(N->getOperand(i).getOperand(1))) { - VecIn1 = VecIn2 = SDOperand(0, 0); + VecIn1 = VecIn2 = SDValue(0, 0); break; } // If the input vector type disagrees with the result of the build_vector, // we can't make a shuffle. - SDOperand ExtractedFromVec = N->getOperand(i).getOperand(0); + SDValue ExtractedFromVec = N->getOperand(i).getOperand(0); if (ExtractedFromVec.getValueType() != VT) { - VecIn1 = VecIn2 = SDOperand(0, 0); + VecIn1 = VecIn2 = SDValue(0, 0); break; } @@ -4822,21 +4822,21 @@ SDOperand DAGCombiner::visitBUILD_VECTOR(SDNode *N) { VecIn2 = ExtractedFromVec; } else { // Too many inputs. - VecIn1 = VecIn2 = SDOperand(0, 0); + VecIn1 = VecIn2 = SDValue(0, 0); break; } } // If everything is good, we can make a shuffle operation. if (VecIn1.Val) { - SmallVector<SDOperand, 8> BuildVecIndices; + SmallVector<SDValue, 8> BuildVecIndices; for (unsigned i = 0; i != NumInScalars; ++i) { if (N->getOperand(i).getOpcode() == ISD::UNDEF) { BuildVecIndices.push_back(DAG.getNode(ISD::UNDEF, TLI.getPointerTy())); continue; } - SDOperand Extract = N->getOperand(i); + SDValue Extract = N->getOperand(i); // If extracting from the first vector, just use the index directly. if (Extract.getOperand(0) == VecIn1) { @@ -4853,13 +4853,13 @@ SDOperand DAGCombiner::visitBUILD_VECTOR(SDNode *N) { MVT BuildVecVT = MVT::getVectorVT(TLI.getPointerTy(), NumElts); // Return the new VECTOR_SHUFFLE node. - SDOperand Ops[5]; + SDValue Ops[5]; Ops[0] = VecIn1; if (VecIn2.Val) { Ops[1] = VecIn2; } else { // Use an undef build_vector as input for the second operand. - std::vector<SDOperand> UnOps(NumInScalars, + std::vector<SDValue> UnOps(NumInScalars, DAG.getNode(ISD::UNDEF, EltType)); Ops[1] = DAG.getNode(ISD::BUILD_VECTOR, VT, @@ -4871,10 +4871,10 @@ SDOperand DAGCombiner::visitBUILD_VECTOR(SDNode *N) { return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Ops, 3); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { +SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { // TODO: Check to see if this is a CONCAT_VECTORS of a bunch of // EXTRACT_SUBVECTOR operations. If so, and if the EXTRACT_SUBVECTOR vector // inputs come from at most two distinct vectors, turn this into a shuffle @@ -4885,11 +4885,11 @@ SDOperand DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { return N->getOperand(0); } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { - SDOperand ShufMask = N->getOperand(2); +SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { + SDValue ShufMask = N->getOperand(2); unsigned NumElts = ShufMask.getNumOperands(); // If the shuffle mask is an identity operation on the LHS, return the LHS. @@ -4937,8 +4937,8 @@ SDOperand DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { } } - SDOperand N0 = N->getOperand(0); - SDOperand N1 = N->getOperand(1); + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); // Normalize unary shuffle so the RHS is undef. if (isUnary && VecNum == 1) std::swap(N0, N1); @@ -4952,7 +4952,7 @@ SDOperand DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { // not the number of vector elements, look through it. Be careful not to // look though conversions that change things like v4f32 to v2f64. if (V->getOpcode() == ISD::BIT_CONVERT) { - SDOperand ConvInput = V->getOperand(0); + SDValue ConvInput = V->getOperand(0); if (ConvInput.getValueType().isVector() && ConvInput.getValueType().getVectorNumElements() == NumElts) V = ConvInput.Val; @@ -4961,7 +4961,7 @@ SDOperand DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { if (V->getOpcode() == ISD::BUILD_VECTOR) { unsigned NumElems = V->getNumOperands(); if (NumElems > BaseIdx) { - SDOperand Base; + SDValue Base; bool AllSame = true; for (unsigned i = 0; i != NumElems; ++i) { if (V->getOperand(i).getOpcode() != ISD::UNDEF) { @@ -4990,7 +4990,7 @@ SDOperand DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { if (isUnary || N0 == N1) { // Check the SHUFFLE mask, mapping any inputs from the 2nd operand into the // first operand. - SmallVector<SDOperand, 8> MappedOps; + SmallVector<SDValue, 8> MappedOps; for (unsigned i = 0; i != NumElts; ++i) { if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF || cast<ConstantSDNode>(ShufMask.getOperand(i))->getValue() < NumElts) { @@ -5011,85 +5011,85 @@ SDOperand DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { ShufMask); } - return SDOperand(); + return SDValue(); } /// XformToShuffleWithZero - Returns a vector_shuffle if it able to transform /// an AND to a vector_shuffle with the destination vector and a zero vector. /// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==> /// vector_shuffle V, Zero, <0, 4, 2, 4> -SDOperand DAGCombiner::XformToShuffleWithZero(SDNode *N) { - SDOperand LHS = N->getOperand(0); - SDOperand RHS = N->getOperand(1); +SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); if (N->getOpcode() == ISD::AND) { if (RHS.getOpcode() == ISD::BIT_CONVERT) RHS = RHS.getOperand(0); if (RHS.getOpcode() == ISD::BUILD_VECTOR) { - std::vector<SDOperand> IdxOps; + std::vector<SDValue> IdxOps; unsigned NumOps = RHS.getNumOperands(); unsigned NumElts = NumOps; MVT EVT = RHS.getValueType().getVectorElementType(); for (unsigned i = 0; i != NumElts; ++i) { - SDOperand Elt = RHS.getOperand(i); + SDValue Elt = RHS.getOperand(i); if (!isa<ConstantSDNode>(Elt)) - return SDOperand(); + return SDValue(); else if (cast<ConstantSDNode>(Elt)->isAllOnesValue()) IdxOps.push_back(DAG.getConstant(i, EVT)); else if (cast<ConstantSDNode>(Elt)->isNullValue()) IdxOps.push_back(DAG.getConstant(NumElts, EVT)); else - return SDOperand(); + return SDValue(); } // Let's see if the target supports this vector_shuffle. if (!TLI.isVectorClearMaskLegal(IdxOps, EVT, DAG)) - return SDOperand(); + return SDValue(); // Return the new VECTOR_SHUFFLE node. MVT VT = MVT::getVectorVT(EVT, NumElts); - std::vector<SDOperand> Ops; + std::vector<SDValue> Ops; LHS = DAG.getNode(ISD::BIT_CONVERT, VT, LHS); Ops.push_back(LHS); AddToWorkList(LHS.Val); - std::vector<SDOperand> ZeroOps(NumElts, DAG.getConstant(0, EVT)); + std::vector<SDValue> ZeroOps(NumElts, DAG.getConstant(0, EVT)); Ops.push_back(DAG.getNode(ISD::BUILD_VECTOR, VT, &ZeroOps[0], ZeroOps.size())); Ops.push_back(DAG.getNode(ISD::BUILD_VECTOR, VT, &IdxOps[0], IdxOps.size())); - SDOperand Result = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, + SDValue Result = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, &Ops[0], Ops.size()); if (VT != N->getValueType(0)) Result = DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Result); return Result; } } - return SDOperand(); + return SDValue(); } /// SimplifyVBinOp - Visit a binary vector operation, like ADD. -SDOperand DAGCombiner::SimplifyVBinOp(SDNode *N) { +SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) { // After legalize, the target may be depending on adds and other // binary ops to provide legal ways to construct constants or other // things. Simplifying them may result in a loss of legality. - if (AfterLegalize) return SDOperand(); + if (AfterLegalize) return SDValue(); MVT VT = N->getValueType(0); assert(VT.isVector() && "SimplifyVBinOp only works on vectors!"); MVT EltType = VT.getVectorElementType(); - SDOperand LHS = N->getOperand(0); - SDOperand RHS = N->getOperand(1); - SDOperand Shuffle = XformToShuffleWithZero(N); + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + SDValue Shuffle = XformToShuffleWithZero(N); if (Shuffle.Val) return Shuffle; // If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold // this operation. if (LHS.getOpcode() == ISD::BUILD_VECTOR && RHS.getOpcode() == ISD::BUILD_VECTOR) { - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; for (unsigned i = 0, e = LHS.getNumOperands(); i != e; ++i) { - SDOperand LHSOp = LHS.getOperand(i); - SDOperand RHSOp = RHS.getOperand(i); + SDValue LHSOp = LHS.getOperand(i); + SDValue RHSOp = RHS.getOperand(i); // If these two elements can't be folded, bail out. if ((LHSOp.getOpcode() != ISD::UNDEF && LHSOp.getOpcode() != ISD::Constant && @@ -5121,13 +5121,13 @@ SDOperand DAGCombiner::SimplifyVBinOp(SDNode *N) { } } - return SDOperand(); + return SDValue(); } -SDOperand DAGCombiner::SimplifySelect(SDOperand N0, SDOperand N1, SDOperand N2){ +SDValue DAGCombiner::SimplifySelect(SDValue N0, SDValue N1, SDValue N2){ assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!"); - SDOperand SCC = SimplifySelectCC(N0.getOperand(0), N0.getOperand(1), N1, N2, + SDValue SCC = SimplifySelectCC(N0.getOperand(0), N0.getOperand(1), N1, N2, cast<CondCodeSDNode>(N0.getOperand(2))->get()); // If we got a simplified select_cc node back from SimplifySelectCC, then // break it down into a new SETCC node, and a new SELECT node, and then return @@ -5136,7 +5136,7 @@ SDOperand DAGCombiner::SimplifySelect(SDOperand N0, SDOperand N1, SDOperand N2){ // Check to see if we got a select_cc back (to turn into setcc/select). // Otherwise, just return whatever node we got back, like fabs. if (SCC.getOpcode() == ISD::SELECT_CC) { - SDOperand SETCC = DAG.getNode(ISD::SETCC, N0.getValueType(), + SDValue SETCC = DAG.getNode(ISD::SETCC, N0.getValueType(), SCC.getOperand(0), SCC.getOperand(1), SCC.getOperand(4)); AddToWorkList(SETCC.Val); @@ -5145,7 +5145,7 @@ SDOperand DAGCombiner::SimplifySelect(SDOperand N0, SDOperand N1, SDOperand N2){ } return SCC; } - return SDOperand(); + return SDValue(); } /// SimplifySelectOps - Given a SELECT or a SELECT_CC node, where LHS and RHS @@ -5155,8 +5155,8 @@ SDOperand DAGCombiner::SimplifySelect(SDOperand N0, SDOperand N1, SDOperand N2){ /// node) back to the top-level of the DAG combiner loop to avoid it being /// looked at. /// -bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDOperand LHS, - SDOperand RHS) { +bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS, + SDValue RHS) { // If this is a select from two identical things, try to pull the operation // through the select. @@ -5179,7 +5179,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDOperand LHS, // FIXME: this conflates two src values, discarding one. This is not // the right thing to do, but nothing uses srcvalues now. When they do, // turn SrcValue into a list of locations. - SDOperand Addr; + SDValue Addr; if (TheSelect->getOpcode() == ISD::SELECT) { // Check that the condition doesn't reach either load. If so, folding // this will induce a cycle into the DAG. @@ -5205,7 +5205,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDOperand LHS, } if (Addr.Val) { - SDOperand Load; + SDValue Load; if (LLD->getExtensionType() == ISD::NON_EXTLOAD) Load = DAG.getLoad(TheSelect->getValueType(0), LLD->getChain(), Addr,LLD->getSrcValue(), @@ -5237,9 +5237,9 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDOperand LHS, return false; } -SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, - SDOperand N2, SDOperand N3, - ISD::CondCode CC, bool NotExtCompare) { +SDValue DAGCombiner::SimplifySelectCC(SDValue N0, SDValue N1, + SDValue N2, SDValue N3, + ISD::CondCode CC, bool NotExtCompare) { MVT VT = N2.getValueType(); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); @@ -5247,7 +5247,7 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.Val); // Determine if the condition we're dealing with is constant - SDOperand SCC = SimplifySetCC(TLI.getSetCCResultType(N0), N0, N1, CC, false); + SDValue SCC = SimplifySetCC(TLI.getSetCCResultType(N0), N0, N1, CC, false); if (SCC.Val) AddToWorkList(SCC.Val); ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.Val); @@ -5291,8 +5291,8 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue()-1)) == 0)) { unsigned ShCtV = N2C->getAPIntValue().logBase2(); ShCtV = XType.getSizeInBits()-ShCtV-1; - SDOperand ShCt = DAG.getConstant(ShCtV, TLI.getShiftAmountTy()); - SDOperand Shift = DAG.getNode(ISD::SRL, XType, N0, ShCt); + SDValue ShCt = DAG.getConstant(ShCtV, TLI.getShiftAmountTy()); + SDValue Shift = DAG.getNode(ISD::SRL, XType, N0, ShCt); AddToWorkList(Shift.Val); if (XType.bitsGT(AType)) { Shift = DAG.getNode(ISD::TRUNCATE, AType, Shift); @@ -5300,7 +5300,7 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, } return DAG.getNode(ISD::AND, AType, Shift, N2); } - SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0, + SDValue Shift = DAG.getNode(ISD::SRA, XType, N0, DAG.getConstant(XType.getSizeInBits()-1, TLI.getShiftAmountTy())); AddToWorkList(Shift.Val); @@ -5319,12 +5319,12 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, // If the caller doesn't want us to simplify this into a zext of a compare, // don't do it. if (NotExtCompare && N2C->getAPIntValue() == 1) - return SDOperand(); + return SDValue(); // Get a SetCC of the condition // FIXME: Should probably make sure that setcc is legal if we ever have a // target where it isn't. - SDOperand Temp, SCC; + SDValue Temp, SCC; // cast from setcc result type to select result type if (AfterLegalize) { SCC = DAG.getSetCC(TLI.getSetCCResultType(N0), N0, N1, CC); @@ -5354,7 +5354,7 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, MVT XType = N0.getValueType(); if (!AfterLegalize || TLI.isOperationLegal(ISD::SETCC, TLI.getSetCCResultType(N0))) { - SDOperand Res = DAG.getSetCC(TLI.getSetCCResultType(N0), N0, N1, CC); + SDValue Res = DAG.getSetCC(TLI.getSetCCResultType(N0), N0, N1, CC); if (Res.getValueType() != VT) Res = DAG.getNode(ISD::ZERO_EXTEND, VT, Res); return Res; @@ -5364,16 +5364,16 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, if (N1C && N1C->isNullValue() && CC == ISD::SETEQ && (!AfterLegalize || TLI.isOperationLegal(ISD::CTLZ, XType))) { - SDOperand Ctlz = DAG.getNode(ISD::CTLZ, XType, N0); + SDValue Ctlz = DAG.getNode(ISD::CTLZ, XType, N0); return DAG.getNode(ISD::SRL, XType, Ctlz, DAG.getConstant(Log2_32(XType.getSizeInBits()), TLI.getShiftAmountTy())); } // setgt X, 0 -> srl (and (-X, ~X), size(X)-1) if (N1C && N1C->isNullValue() && CC == ISD::SETGT) { - SDOperand NegN0 = DAG.getNode(ISD::SUB, XType, DAG.getConstant(0, XType), + SDValue NegN0 = DAG.getNode(ISD::SUB, XType, DAG.getConstant(0, XType), N0); - SDOperand NotN0 = DAG.getNode(ISD::XOR, XType, N0, + SDValue NotN0 = DAG.getNode(ISD::XOR, XType, N0, DAG.getConstant(~0ULL, XType)); return DAG.getNode(ISD::SRL, XType, DAG.getNode(ISD::AND, XType, NegN0, NotN0), @@ -5382,7 +5382,7 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, } // setgt X, -1 -> xor (srl (X, size(X)-1), 1) if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) { - SDOperand Sign = DAG.getNode(ISD::SRL, XType, N0, + SDValue Sign = DAG.getNode(ISD::SRL, XType, N0, DAG.getConstant(XType.getSizeInBits()-1, TLI.getShiftAmountTy())); return DAG.getNode(ISD::XOR, XType, Sign, DAG.getConstant(1, XType)); @@ -5395,10 +5395,10 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1) && N2.getOperand(0) == N1 && N0.getValueType().isInteger()) { MVT XType = N0.getValueType(); - SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0, + SDValue Shift = DAG.getNode(ISD::SRA, XType, N0, DAG.getConstant(XType.getSizeInBits()-1, TLI.getShiftAmountTy())); - SDOperand Add = DAG.getNode(ISD::ADD, XType, N0, Shift); + SDValue Add = DAG.getNode(ISD::ADD, XType, N0, Shift); AddToWorkList(Shift.Val); AddToWorkList(Add.Val); return DAG.getNode(ISD::XOR, XType, Add, Shift); @@ -5410,10 +5410,10 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, if (ConstantSDNode *SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0))) { MVT XType = N0.getValueType(); if (SubC->isNullValue() && XType.isInteger()) { - SDOperand Shift = DAG.getNode(ISD::SRA, XType, N0, + SDValue Shift = DAG.getNode(ISD::SRA, XType, N0, DAG.getConstant(XType.getSizeInBits()-1, TLI.getShiftAmountTy())); - SDOperand Add = DAG.getNode(ISD::ADD, XType, N0, Shift); + SDValue Add = DAG.getNode(ISD::ADD, XType, N0, Shift); AddToWorkList(Shift.Val); AddToWorkList(Add.Val); return DAG.getNode(ISD::XOR, XType, Add, Shift); @@ -5421,13 +5421,13 @@ SDOperand DAGCombiner::SimplifySelectCC(SDOperand N0, SDOperand N1, } } - return SDOperand(); + return SDValue(); } /// SimplifySetCC - This is a stub for TargetLowering::SimplifySetCC. -SDOperand DAGCombiner::SimplifySetCC(MVT VT, SDOperand N0, - SDOperand N1, ISD::CondCode Cond, - bool foldBooleans) { +SDValue DAGCombiner::SimplifySetCC(MVT VT, SDValue N0, + SDValue N1, ISD::CondCode Cond, + bool foldBooleans) { TargetLowering::DAGCombinerInfo DagCombineInfo(DAG, !AfterLegalize, false, this); return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo); @@ -5437,9 +5437,9 @@ SDOperand DAGCombiner::SimplifySetCC(MVT VT, SDOperand N0, /// return a DAG expression to select that will generate the same value by /// multiplying by a magic number. See: /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> -SDOperand DAGCombiner::BuildSDIV(SDNode *N) { +SDValue DAGCombiner::BuildSDIV(SDNode *N) { std::vector<SDNode*> Built; - SDOperand S = TLI.BuildSDIV(N, DAG, &Built); + SDValue S = TLI.BuildSDIV(N, DAG, &Built); for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end(); ii != ee; ++ii) @@ -5451,9 +5451,9 @@ SDOperand DAGCombiner::BuildSDIV(SDNode *N) { /// return a DAG expression to select that will generate the same value by /// multiplying by a magic number. See: /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> -SDOperand DAGCombiner::BuildUDIV(SDNode *N) { +SDValue DAGCombiner::BuildUDIV(SDNode *N) { std::vector<SDNode*> Built; - SDOperand S = TLI.BuildUDIV(N, DAG, &Built); + SDValue S = TLI.BuildUDIV(N, DAG, &Built); for (std::vector<SDNode*>::iterator ii = Built.begin(), ee = Built.end(); ii != ee; ++ii) @@ -5463,7 +5463,7 @@ SDOperand DAGCombiner::BuildUDIV(SDNode *N) { /// FindBaseOffset - Return true if base is known not to alias with anything /// but itself. Provides base object and offset as results. -static bool FindBaseOffset(SDOperand Ptr, SDOperand &Base, int64_t &Offset) { +static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset) { // Assume it is a primitive operation. Base = Ptr; Offset = 0; @@ -5483,16 +5483,16 @@ static bool FindBaseOffset(SDOperand Ptr, SDOperand &Base, int64_t &Offset) { /// isAlias - Return true if there is any possibility that the two addresses /// overlap. -bool DAGCombiner::isAlias(SDOperand Ptr1, int64_t Size1, +bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1, const Value *SrcValue1, int SrcValueOffset1, - SDOperand Ptr2, int64_t Size2, + SDValue Ptr2, int64_t Size2, const Value *SrcValue2, int SrcValueOffset2) { // If they are the same then they must be aliases. if (Ptr1 == Ptr2) return true; // Gather base node and offset information. - SDOperand Base1, Base2; + SDValue Base1, Base2; int64_t Offset1, Offset2; bool KnownBase1 = FindBaseOffset(Ptr1, Base1, Offset1); bool KnownBase2 = FindBaseOffset(Ptr2, Base2, Offset2); @@ -5524,7 +5524,7 @@ bool DAGCombiner::isAlias(SDOperand Ptr1, int64_t Size1, /// FindAliasInfo - Extracts the relevant alias information from the memory /// node. Returns true if the operand was a load. bool DAGCombiner::FindAliasInfo(SDNode *N, - SDOperand &Ptr, int64_t &Size, + SDValue &Ptr, int64_t &Size, const Value *&SrcValue, int &SrcValueOffset) { if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { Ptr = LD->getBasePtr(); @@ -5546,13 +5546,13 @@ bool DAGCombiner::FindAliasInfo(SDNode *N, /// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes, /// looking for aliasing nodes and adding them to the Aliases vector. -void DAGCombiner::GatherAllAliases(SDNode *N, SDOperand OriginalChain, - SmallVector<SDOperand, 8> &Aliases) { - SmallVector<SDOperand, 8> Chains; // List of chains to visit. +void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain, + SmallVector<SDValue, 8> &Aliases) { + SmallVector<SDValue, 8> Chains; // List of chains to visit. std::set<SDNode *> Visited; // Visited node set. // Get alias information for node. - SDOperand Ptr; + SDValue Ptr; int64_t Size; const Value *SrcValue; int SrcValueOffset; @@ -5565,7 +5565,7 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDOperand OriginalChain, // aliases list. If not, then continue up the chain looking for the next // candidate. while (!Chains.empty()) { - SDOperand Chain = Chains.back(); + SDValue Chain = Chains.back(); Chains.pop_back(); // Don't bother if we've been before. @@ -5580,7 +5580,7 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDOperand OriginalChain, case ISD::LOAD: case ISD::STORE: { // Get alias information for Chain. - SDOperand OpPtr; + SDValue OpPtr; int64_t OpSize; const Value *OpSrcValue; int OpSrcValueOffset; @@ -5622,8 +5622,8 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDOperand OriginalChain, /// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking /// for a better chain (aliasing node.) -SDOperand DAGCombiner::FindBetterChain(SDNode *N, SDOperand OldChain) { - SmallVector<SDOperand, 8> Aliases; // Ops for replacing token factor. +SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) { + SmallVector<SDValue, 8> Aliases; // Ops for replacing token factor. // Accumulate all the aliases to this node. GatherAllAliases(N, OldChain, Aliases); @@ -5637,7 +5637,7 @@ SDOperand DAGCombiner::FindBetterChain(SDNode *N, SDOperand OldChain) { } // Construct a custom tailored token factor. - SDOperand NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, + SDValue NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Aliases[0], Aliases.size()); // Make sure the old chain gets cleaned up. diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index dfc74f3d03..12b7b4aff5 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -57,7 +57,7 @@ class VISIBILITY_HIDDEN SelectionDAGLegalize { /// LastCALLSEQ_END - This keeps track of the CALLSEQ_END node that has been /// legalized. We use this to ensure that calls are properly serialized /// against each other, including inserted libcalls. - SDOperand LastCALLSEQ_END; + SDValue LastCALLSEQ_END; /// IsLegalizingCall - This member is used *only* for purposes of providing /// helpful assertions that a libcall isn't created while another call is @@ -78,35 +78,35 @@ class VISIBILITY_HIDDEN SelectionDAGLegalize { /// LegalizedNodes - For nodes that are of legal width, and that have more /// than one use, this map indicates what regularized operand to use. This /// allows us to avoid legalizing the same thing more than once. - DenseMap<SDOperand, SDOperand> LegalizedNodes; + DenseMap<SDValue, SDValue> LegalizedNodes; /// PromotedNodes - For nodes that are below legal width, and that have more /// than one use, this map indicates what promoted value to use. This allows /// us to avoid promoting the same thing more than once. - DenseMap<SDOperand, SDOperand> PromotedNodes; + DenseMap<SDValue, SDValue> PromotedNodes; /// ExpandedNodes - For nodes that need to be expanded this map indicates /// which which operands are the expanded version of the input. This allows /// us to avoid expanding the same node more than once. - DenseMap<SDOperand, std::pair<SDOperand, SDOperand> > ExpandedNodes; + DenseMap<SDValue, std::pair<SDValue, SDValue> > ExpandedNodes; /// SplitNodes - For vector nodes that need to be split, this map indicates /// which which operands are the split version of the input. This allows us /// to avoid splitting the same node more than once. - std::map<SDOperand, std::pair<SDOperand, SDOperand> > SplitNodes; + std::map<SDValue, std::pair<SDValue, SDValue> > SplitNodes; /// ScalarizedNodes - For nodes that need to be converted from vector types to /// scalar types, this contains the mapping of ones we have already /// processed to the result. - std::map<SDOperand, SDOperand> ScalarizedNodes; + std::map<SDValue, SDValue> ScalarizedNodes; - void AddLegalizedOperand(SDOperand From, SDOperand To) { + void AddLegalizedOperand(SDValue From, SDValue To) { LegalizedNodes.insert(std::make_pair(From, To)); // If someone requests legalization of the new node, return itself. if (From != To) LegalizedNodes.insert(std::make_pair(To, To)); } - void AddPromotedOperand(SDOperand From, SDOperand To) { + void AddPromotedOperand(SDValue From, SDValue To) { bool isNew = PromotedNodes.insert(std::make_pair(From, To)).second; assert(isNew && "Got into the map somehow?"); // If someone requests legalization of the new node, return itself. @@ -134,49 +134,49 @@ public: private: /// HandleOp - Legalize, Promote, or Expand the specified operand as /// appropriate for its type. - void HandleOp(SDOperand Op); + void HandleOp(SDValue Op); /// LegalizeOp - We know that the specified value has a legal type. /// Recursively ensure that the operands have legal types, then return the /// result. - SDOperand LegalizeOp(SDOperand O); + SDValue LegalizeOp(SDValue O); /// UnrollVectorOp - We know that the given vector has a legal type, however /// the operation it performs is not legal and is an operation that we have /// no way of lowering. "Unroll" the vector, splitting out the scalars and /// operating on each element individually. - SDOperand UnrollVectorOp(SDOperand O); + SDValue UnrollVectorOp(SDValue O); /// PerformInsertVectorEltInMemory - Some target cannot handle a variable /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it /// is necessary to spill the vector being inserted into to memory, perform /// the insert there, and then read the result back. - SDOperand PerformInsertVectorEltInMemory(SDOperand Vec, SDOperand Val, - SDOperand Idx); + SDValue PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, + SDValue Idx); /// PromoteOp - Given an operation that produces a value in an invalid type, /// promote it to compute the value into a larger type. The produced value /// will have the correct bits for the low portion of the register, but no /// guarantee is made about the top bits: it may be zero, sign-extended, or /// garbage. - SDOperand PromoteOp(SDOperand O); + SDValue PromoteOp(SDValue O); - /// ExpandOp - Expand the specified SDOperand into its two component pieces + /// ExpandOp - Expand the specified SDValue into its two component pieces /// Lo&Hi. Note that the Op MUST be an expanded type. As a result of this, /// the LegalizeNodes map is filled in for any results that are not expanded, /// the ExpandedNodes map is filled in for any results that are expanded, and /// the Lo/Hi values are returned. This applies to integer types and Vector /// types. - void ExpandOp(SDOperand O, SDOperand &Lo, SDOperand &Hi); + void ExpandOp(SDValue O, SDValue &Lo, SDValue &Hi); /// SplitVectorOp - Given an operand of vector type, break it down into /// two smaller values. - void SplitVectorOp(SDOperand O, SDOperand &Lo, SDOperand &Hi); + void SplitVectorOp(SDValue O, SDValue &Lo, SDValue &Hi); /// ScalarizeVectorOp - Given an operand of single-element vector type /// (e.g. v1f32), convert it into the equivalent operation that returns a /// scalar (e.g. f32) value. - SDOperand ScalarizeVectorOp(SDOperand O); + SDValue ScalarizeVectorOp(SDValue O); /// isShuffleLegal - Return non-null if a vector shuffle is legal with the /// specified mask and type. Targets can specify exactly which masks they @@ -187,33 +187,33 @@ private: /// /// If this is a legal shuffle, this method returns the (possibly promoted) /// build_vector Mask. If it's not a legal shuffle, it returns null. - SDNode *isShuffleLegal(MVT VT, SDOperand Mask) const; + SDNode *isShuffleLegal(MVT VT, SDValue Mask) const; bool LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, SmallPtrSet<SDNode*, 32> &NodesLeadingTo); - void LegalizeSetCCOperands(SDOperand &LHS, SDOperand &RHS, SDOperand &CC); + void LegalizeSetCCOperands(SDValue &LHS, SDValue &RHS, SDValue &CC); - SDOperand ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned, - SDOperand &Hi); - SDOperand ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source); - - SDOperand EmitStackConvert(SDOperand SrcOp, MVT SlotVT, MVT DestVT); - SDOperand ExpandBUILD_VECTOR(SDNode *Node); - SDOperand ExpandSCALAR_TO_VECTOR(SDNode *Node); - SDOperand ExpandLegalINT_TO_FP(bool isSigned, SDOperand LegalOp, MVT DestVT); - SDOperand PromoteLegalINT_TO_FP(SDOperand LegalOp, MVT DestVT, bool isSigned); - SDOperand PromoteLegalFP_TO_INT(SDOperand LegalOp, MVT DestVT, bool isSigned); - - SDOperand ExpandBSWAP(SDOperand Op); - SDOperand ExpandBitCount(unsigned Opc, SDOperand Op); - bool ExpandShift(unsigned Opc, SDOperand Op, SDOperand Amt, - SDOperand &Lo, SDOperand &Hi); - void ExpandShiftParts(unsigned NodeOp, SDOperand Op, SDOperand Amt, - SDOperand &Lo, SDOperand &Hi); - - SDOperand ExpandEXTRACT_SUBVECTOR(SDOperand Op); - SDOperand ExpandEXTRACT_VECTOR_ELT(SDOperand Op); + SDValue ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, bool isSigned, + SDValue &Hi); + SDValue ExpandIntToFP(bool isSigned, MVT DestTy, SDValue Source); + + SDValue EmitStackConvert(SDValue SrcOp, MVT SlotVT, MVT DestVT); + SDValue ExpandBUILD_VECTOR(SDNode *Node); + SDValue ExpandSCALAR_TO_VECTOR(SDNode *Node); + SDValue ExpandLegalINT_TO_FP(bool isSigned, SDValue LegalOp, MVT DestVT); + SDValue PromoteLegalINT_TO_FP(SDValue LegalOp, MVT DestVT, bool isSigned); + SDValue PromoteLegalFP_TO_INT(SDValue LegalOp, MVT DestVT, bool isSigned); + + SDValue ExpandBSWAP(SDValue Op); + SDValue ExpandBitCount(unsigned Opc, SDValue Op); + bool ExpandShift(unsigned Opc, SDValue Op, SDValue Amt, + SDValue &Lo, SDValue &Hi); + void ExpandShiftParts(unsigned NodeOp, SDValue Op, SDValue Amt, + SDValue &Lo, SDValue &Hi); + + SDValue ExpandEXTRACT_SUBVECTOR(SDValue Op); + SDValue ExpandEXTRACT_VECTOR_ELT(SDValue Op); }; } @@ -223,7 +223,7 @@ private: /// /// Note that this will also return true for shuffles that are promoted to a /// different type. -SDNode *SelectionDAGLegalize::isShuffleLegal(MVT VT, SDOperand Mask) const { +SDNode *SelectionDAGLegalize::isShuffleLegal(MVT VT, SDValue Mask) const { switch (TLI.getOperationAction(ISD::VECTOR_SHUFFLE, VT)) { default: return 0; case TargetLowering::Legal: @@ -241,9 +241,9 @@ SDNode *SelectionDAGLegalize::isShuffleLegal(MVT VT, SDOperand Mask) const { assert(NumEltsGrowth && "Cannot promote to vector type with fewer elts!"); if (NumEltsGrowth > 1) { // Renumber the elements. - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; for (unsigned i = 0, e = Mask.getNumOperands(); i != e; ++i) { - SDOperand InOp = Mask.getOperand(i); + SDValue InOp = Mask.getOperand(i); for (unsigned j = 0; j != NumEltsGrowth; ++j) { if (InOp.getOpcode() == ISD::UNDEF) Ops.push_back(DAG.getNode(ISD::UNDEF, EltVT)); @@ -322,10 +322,10 @@ void SelectionDAGLegalize::LegalizeDAG() { ComputeTopDownOrdering(DAG, Order); for (unsigned i = 0, e = Order.size(); i != e; ++i) - HandleOp(SDOperand(Order[i], 0)); + HandleOp(SDValue(Order[i], 0)); // Finally, it's possible the root changed. Get the new root. - SDOperand OldRoot = DAG.getRoot(); + SDValue OldRoot = DAG.getRoot(); assert(LegalizedNodes.count(OldRoot) && "Root didn't get legalized?"); DAG.setRoot(LegalizedNodes[OldRoot]); @@ -349,15 +349,15 @@ static SDNode *FindCallEndFromCallStart(SDNode *Node) { return 0; // No CallSeqEnd // The chain is usually at the end. - SDOperand TheChain(Node, Node->getNumValues()-1); + SDValue TheChain(Node, Node->getNumValues()-1); if (TheChain.getValueType() != MVT::Other) { // Sometimes it's at the beginning. - TheChain = SDOperand(Node, 0); + TheChain = SDValue(Node, 0); if (TheChain.getValueType() != MVT::Other) { // Otherwise, hunt for it. for (unsigned i = 1, e = Node->getNumValues(); i != e; ++i) if (Node->getValueType(i) == MVT::Other) { - TheChain = SDOperand(Node, i); + TheChain = SDValue(Node, i); break; } @@ -410,13 +410,13 @@ bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, // reach N. switch (getTypeAction(N->getValueType(0))) { case Legal: - if (LegalizedNodes.count(SDOperand(N, 0))) return false; + if (LegalizedNodes.count(SDValue(N, 0))) return false; break; case Promote: - if (PromotedNodes.count(SDOperand(N, 0))) return false; + if (PromotedNodes.count(SDValue(N, 0))) return false; break; case Expand: - if (ExpandedNodes.count(SDOperand(N, 0))) return false; + if (ExpandedNodes.count(SDValue(N, 0))) return false; break; } @@ -433,13 +433,13 @@ bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode *N, SDNode *Dest, } // Okay, this node looks safe, legalize it and return false. - HandleOp(SDOperand(N, 0)); + HandleOp(SDValue(N, 0)); return false; } /// HandleOp - Legalize, Promote, or Expand the specified operand as /// appropriate for its type. -void SelectionDAGLegalize::HandleOp(SDOperand Op) { +void SelectionDAGLegalize::HandleOp(SDValue Op) { MVT VT = Op.getValueType(); switch (getTypeAction(VT)) { default: assert(0 && "Bad type action!"); @@ -449,7 +449,7 @@ void SelectionDAGLegalize::HandleOp(SDOperand Op) { if (!VT.isVector()) { // If this is an illegal scalar, expand it into its two component // pieces. - SDOperand X, Y; + SDValue X, Y; if (Op.getOpcode() == ISD::TargetConstant) break; // Allow illegal target nodes. ExpandOp(Op, X, Y); @@ -460,7 +460,7 @@ void SelectionDAGLegalize::HandleOp(SDOperand Op) { } else { // Otherwise, this is an illegal multiple element vector. // Split it in half and legalize both parts. - SDOperand X, Y; + SDValue X, Y; SplitVectorOp(Op, X, Y); } break; @@ -469,7 +469,7 @@ void SelectionDAGLegalize::HandleOp(SDOperand Op) { /// ExpandConstantFP - Expands the ConstantFP node to an integer constant or /// a load from the constant pool. -static SDOperand ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, +static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, SelectionDAG &DAG, TargetLowering &TLI) { bool Extend = false; @@ -504,7 +504,7 @@ static SDOperand ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, } } - SDOperand CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); + SDValue CPIdx = DAG.getConstantPool(LLVMC, TLI.getPointerTy()); if (Extend) return DAG.getExtLoad(ISD::EXTLOAD, OrigVT, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), @@ -517,8 +517,8 @@ static SDOperand ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP, /// ExpandFCOPYSIGNToBitwiseOps - Expands fcopysign to a series of bitwise /// operations. static -SDOperand ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT NVT, - SelectionDAG &DAG, TargetLowering &TLI) { +SDValue ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT NVT, + SelectionDAG &DAG, TargetLowering &TLI) { MVT VT = Node->getValueType(0); MVT SrcVT = Node->getOperand(1).getValueType(); assert((SrcVT == MVT::f32 || SrcVT == MVT::f64) && @@ -526,11 +526,11 @@ SDOperand ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT NVT, MVT SrcNVT = (SrcVT == MVT::f64) ? MVT::i64 : MVT::i32; // First get the sign bit of second operand. - SDOperand Mask1 = (SrcVT == MVT::f64) + SDValue Mask1 = (SrcVT == MVT::f64) ? DAG.getConstantFP(BitsToDouble(1ULL << 63), SrcVT) : DAG.getConstantFP(BitsToFloat(1U << 31), SrcVT); Mask1 = DAG.getNode(ISD::BIT_CONVERT, SrcNVT, Mask1); - SDOperand SignBit= DAG.getNode(ISD::BIT_CONVERT, SrcNVT, Node->getOperand(1)); + SDValue SignBit= DAG.getNode(ISD::BIT_CONVERT, SrcNVT, Node->getOperand(1)); SignBit = DAG.getNode(ISD::AND, SrcNVT, SignBit, Mask1); // Shift right or sign-extend it if the two operands have different types. int SizeDiff = SrcNVT.getSizeInBits() - NVT.getSizeInBits(); @@ -545,11 +545,11 @@ SDOperand ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT NVT, } // Clear the sign bit of first operand. - SDOperand Mask2 = (VT == MVT::f64) + SDValue Mask2 = (VT == MVT::f64) ? DAG.getConstantFP(BitsToDouble(~(1ULL << 63)), VT) : DAG.getConstantFP(BitsToFloat(~(1U << 31)), VT); Mask2 = DAG.getNode(ISD::BIT_CONVERT, NVT, Mask2); - SDOperand Result = DAG.getNode(ISD::BIT_CONVERT, NVT, Node->getOperand(0)); + SDValue Result = DAG.getNode(ISD::BIT_CONVERT, NVT, Node->getOperand(0)); Result = DAG.getNode(ISD::AND, NVT, Result, Mask2); // Or the value with the sign bit. @@ -559,11 +559,11 @@ SDOperand ExpandFCOPYSIGNToBitwiseOps(SDNode *Node, MVT NVT, /// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores. static -SDOperand ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, - TargetLowering &TLI) { - SDOperand Chain = ST->getChain(); - SDOperand Ptr = ST->getBasePtr(); - SDOperand Val = ST->getValue(); +SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, + TargetLowering &TLI) { + SDValue Chain = ST->getChain(); + SDValue Ptr = ST->getBasePtr(); + SDValue Val = ST->getValue(); MVT VT = Val.getValueType(); int Alignment = ST->getAlignment(); int SVOffset = ST->getSrcValueOffset(); @@ -581,7 +581,7 @@ SDOperand ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, else assert(0 && "Unaligned store of unsupported type"); - SDOperand Result = DAG.getNode(ISD::BIT_CONVERT, intVT, Val); + SDValue Result = DAG.getNode(ISD::BIT_CONVERT, intVT, Val); return DAG.getStore(Chain, Result, Ptr, ST->getSrcValue(), SVOffset, ST->isVolatile(), Alignment); } @@ -595,12 +595,12 @@ SDOperand ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, int IncrementSize = NumBits / 8; // Divide the stored value in two parts. - SDOperand ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy()); - SDOperand Lo = Val; - SDOperand Hi = DAG.getNode(ISD::SRL, VT, Val, ShiftAmount); + SDValue ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy()); + SDValue Lo = Val; + SDValue Hi = DAG.getNode(ISD::SRL, VT, Val, ShiftAmount); // Store the two parts - SDOperand Store1, Store2; + SDValue Store1, Store2; Store1 = DAG.getTruncStore(Chain, TLI.isLittleEndian()?Lo:Hi, Ptr, ST->getSrcValue(), SVOffset, NewStoredVT, ST->isVolatile(), Alignment); @@ -616,11 +616,11 @@ SDOperand ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG, /// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads. static -SDOperand ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, - TargetLowering &TLI) { +SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, + TargetLowering &TLI) { int SVOffset = LD->getSrcValueOffset(); - SDOperand Chain = LD->getChain(); - SDOperand Ptr = LD->getBasePtr(); + SDValue Chain = LD->getChain(); + SDValue Ptr = LD->getBasePtr(); MVT VT = LD->getValueType(0); MVT LoadedVT = LD->getMemoryVT(); if (VT.isFloatingPoint() || VT.isVector()) { @@ -637,14 +637,14 @@ SDOperand ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, else assert(0 && "Unaligned load of unsupported type"); - SDOperand newLoad = DAG.getLoad(intVT, Chain, Ptr, LD->getSrcValue(), + SDValue newLoad = DAG.getLoad(intVT, Chain, Ptr, LD->getSrcValue(), SVOffset, LD->isVolatile(), LD->getAlignment()); - SDOperand Result = DAG.getNode(ISD::BIT_CONVERT, LoadedVT, newLoad); + SDValue Result = DAG.getNode(ISD::BIT_CONVERT, LoadedVT, newLoad); if (VT.isFloatingPoint() && LoadedVT != VT) Result = DAG.getNode(ISD::FP_EXTEND, VT, Result); - SDOperand Ops[] = { Result, Chain }; + SDValue Ops[] = { Result, Chain }; return DAG.getMergeValues(Ops, 2); } assert(LoadedVT.isInteger() && !LoadedVT.isVector() && @@ -666,7 +666,7 @@ SDOperand ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, HiExtType = ISD::ZEXTLOAD; // Load the value in two parts - SDOperand Lo, Hi; + SDValue Lo, Hi; if (TLI.isLittleEndian()) { Lo = DAG.getExtLoad(ISD::ZEXTLOAD, VT, Chain, Ptr, LD->getSrcValue(), SVOffset, NewLoadedVT, LD->isVolatile(), Alignment); @@ -686,14 +686,14 @@ SDOperand ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, } // aggregate the two parts - SDOperand ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy()); - SDOperand Result = DAG.getNode(ISD::SHL, VT, Hi, ShiftAmount); + SDValue ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy()); + SDValue Result = DAG.getNode(ISD::SHL, VT, Hi, ShiftAmount); Result = DAG.getNode(ISD::OR, VT, Result, Lo); - SDOperand TF = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1), + SDValue TF = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1), Hi.getValue(1)); - SDOperand Ops[] = { Result, TF }; + SDValue Ops[] = { Result, TF }; return DAG.getMergeValues(Ops, 2); } @@ -701,7 +701,7 @@ SDOperand ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG, /// the operation it performs is not legal and is an operation that we have /// no way of lowering. "Unroll" the vector, splitting out the scalars and /// operating on each element individually. -SDOperand SelectionDAGLegalize::UnrollVectorOp(SDOperand Op) { +SDValue SelectionDAGLegalize::UnrollVectorOp(SDValue Op) { MVT VT = Op.getValueType(); assert(isTypeLegal(VT) && "Caller should expand or promote operands that are not legal!"); @@ -710,11 +710,11 @@ SDOperand SelectionDAGLegalize::UnrollVectorOp(SDOperand Op) { unsigned NE = VT.getVectorNumElements(); MVT EltVT = VT.getVectorElementType(); - SmallVector<SDOperand, 8> Scalars; - SmallVector<SDOperand, 4> Operands(Op.getNumOperands()); + SmallVector<SDValue, 8> Scalars; + SmallVector<SDValue, 4> Operands(Op.getNumOperands()); for (unsigned i = 0; i != NE; ++i) { for (unsigned j = 0; j != Op.getNumOperands(); ++j) { - SDOperand Operand = Op.getOperand(j); + SDValue Operand = Op.getOperand(j); MVT OperandVT = Operand.getValueType(); if (OperandVT.isVector()) { // A vector operand; extract a single element. @@ -753,11 +753,11 @@ static RTLIB::Libcall GetFPLibCall(MVT VT, /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it /// is necessary to spill the vector being inserted into to memory, perform /// the insert there, and then read the result back. -SDOperand SelectionDAGLegalize:: -PerformInsertVectorEltInMemory(SDOperand Vec, SDOperand Val, SDOperand Idx) { - SDOperand Tmp1 = Vec; - SDOperand Tmp2 = Val; - SDOperand Tmp3 = Idx; +SDValue SelectionDAGLegalize:: +PerformInsertVectorEltInMemory(SDValue Vec, SDValue Val, SDValue Idx) { + SDValue Tmp1 = Vec; + SDValue Tmp2 = Val; + SDValue Tmp3 = Idx; // If the target doesn't support this, we have to spill the input vector // to a temporary stack slot, update the element, then reload it. This is @@ -769,12 +769,12 @@ PerformInsertVectorEltInMemory(SDOperand Vec, SDOperand Val, SDOperand Idx) { MVT EltVT = VT.getVectorElementType(); MVT IdxVT = Tmp3.getValueType(); MVT PtrVT = TLI.getPointerTy(); - SDOperand StackPtr = DAG.CreateStackTemporary(VT); + SDValue StackPtr = DAG.CreateStackTemporary(VT); int SPFI = cast<FrameIndexSDNode>(StackPtr.Val)->getIndex(); // Store the vector. - SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Tmp1, StackPtr, + SDValue Ch = DAG.getStore(DAG.getEntryNode(), Tmp1, StackPtr, PseudoSourceValue::getFixedStack(SPFI), 0); // Truncate or zero extend offset to target pointer type. @@ -783,7 +783,7 @@ PerformInsertVectorEltInMemory(SDOperand Vec, SDOperand Val, SDOperand Idx) { // Add the offset to the index. unsigned EltSize = EltVT.getSizeInBits()/8; Tmp3 = DAG.getNode(ISD::MUL, IdxVT, Tmp3,DAG.getConstant(EltSize, IdxVT)); - SDOperand StackPtr2 = DAG.getNode(ISD::ADD, IdxVT, Tmp3, StackPtr); + SDValue StackPtr2 = DAG.getNode(ISD::ADD, IdxVT, Tmp3, StackPtr); // Store the scalar value. Ch = DAG.getTruncStore(Ch, Tmp2, StackPtr2, PseudoSourceValue::getFixedStack(SPFI), 0, EltVT); @@ -796,7 +796,7 @@ PerformInsertVectorEltInMemory(SDOperand Vec, SDOperand Val, SDOperand Idx) { /// that its operands are legal. Now ensure that the operation itself /// is legal, recursively ensuring that the operands' operations remain /// legal. -SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { +SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) { if (Op.getOpcode() == ISD::TargetConstant) // Allow illegal target nodes. return Op; @@ -818,11 +818,11 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Note that LegalizeOp may be reentered even from single-use nodes, which // means that we always must cache transformed nodes. - DenseMap<SDOperand, SDOperand>::iterator I = LegalizedNodes.find(Op); + DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); if (I != LegalizedNodes.end()) return I->second; - SDOperand Tmp1, Tmp2, Tmp3, Tmp4; - SDOperand Result = Op; + SDValue Tmp1, Tmp2, Tmp3, Tmp4; + SDValue Result = Op; bool isCustom = false; switch (Node->getOpcode()) { @@ -851,7 +851,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { // If this is a target node, legalize it by legalizing the operands then // passing it through. - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) Ops.push_back(LegalizeOp(Node->getOperand(i))); @@ -922,7 +922,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { if (Result.Val) break; // Fall Thru case TargetLowering::Legal: { - SDOperand Ops[] = { DAG.getConstant(0, VT), Tmp1 }; + SDValue Ops[] = { DAG.getConstant(0, VT), Tmp1 }; Result = DAG.getMergeValues(Ops, 2); break; } @@ -956,7 +956,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { if (Result.Val) break; // Fall Thru case TargetLowering::Legal: { - SDOperand Ops[] = { DAG.getConstant(0, VT), Tmp2 }; + SDValue Ops[] = { DAG.getConstant(0, VT), Tmp2 }; Result = DAG.getMergeValues(Ops, 2); break; } @@ -1041,7 +1041,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::INTRINSIC_W_CHAIN: case ISD::INTRINSIC_WO_CHAIN: case ISD::INTRINSIC_VOID: { - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) Ops.push_back(LegalizeOp(Node->getOperand(i))); Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); @@ -1061,8 +1061,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Since loads produce two values, make sure to remember that we // legalized both of them. - AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0)); - AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1)); + AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); + AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result.getValue(Op.ResNo); } @@ -1087,7 +1087,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { unsigned Col = DSP->getColumn(); if (useDEBUG_LOC) { - SDOperand Ops[] = { Tmp1, DAG.getConstant(Line, MVT::i32), + SDValue Ops[] = { Tmp1, DAG.getConstant(Line, MVT::i32), DAG.getConstant(Col, MVT::i32), DAG.getConstant(SrcFile, MVT::i32) }; Result = DAG.getNode(ISD::DEBUG_LOC, MVT::Other, Ops, 4); @@ -1105,7 +1105,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { if (Action == Legal && Tmp1 == Node->getOperand(0)) break; - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; Ops.push_back(Tmp1); if (Action == Legal) { Ops.push_back(Node->getOperand(1)); // line # must be legal. @@ -1201,7 +1201,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { switch (TLI.getOperationAction(ISD::MEMBARRIER, MVT::Other)) { default: assert(0 && "This action is not supported yet!"); case TargetLowering::Legal: { - SDOperand Ops[6]; + SDValue Ops[6]; Ops[0] = LegalizeOp(Node->getOperand(0)); // Legalize the chain. for (int x = 1; x < 6; ++x) { Ops[x] = Node->getOperand(x); @@ -1222,7 +1222,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::ATOMIC_CMP_SWAP: { unsigned int num_operands = 4; assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!"); - SDOperand Ops[4]; + SDValue Ops[4]; for (unsigned int x = 0; x < num_operands; ++x) Ops[x] = LegalizeOp(Node->getOperand(x)); Result = DAG.UpdateNodeOperands(Result, &Ops[0], num_operands); @@ -1235,8 +1235,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case TargetLowering::Legal: break; } - AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0)); - AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1)); + AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); + AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result.getValue(Op.ResNo); } case ISD::ATOMIC_LOAD_ADD: @@ -1252,7 +1252,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::ATOMIC_SWAP: { unsigned int num_operands = 3; assert(Node->getNumOperands() == num_operands && "Invalid Atomic node!"); - SDOperand Ops[3]; + SDValue Ops[3]; for (unsigned int x = 0; x < num_operands; ++x) Ops[x] = LegalizeOp(Node->getOperand(x)); Result = DAG.UpdateNodeOperands(Result, &Ops[0], num_operands); @@ -1263,13 +1263,13 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = TLI.LowerOperation(Result, DAG); break; case TargetLowering::Expand: - Result = SDOperand(TLI.ReplaceNodeResults(Op.Val, DAG),0); + Result = SDValue(TLI.ReplaceNodeResults(Op.Val, DAG),0); break; case TargetLowering::Legal: break; } - AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0)); - AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1)); + AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); + AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result.getValue(Op.ResNo); } case ISD::Constant: { @@ -1334,7 +1334,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Tmp3 = LegalizeOp(Node->getOperand(2)); Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, Tmp3); } else { - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; // Legalize the operands. for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) Ops.push_back(LegalizeOp(Node->getOperand(i))); @@ -1371,7 +1371,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Tmp1 = LegalizeOp(Tmp3.getValue(i)); if (Op.ResNo == i) Tmp2 = Tmp1; - AddLegalizedOperand(SDOperand(Node, i), Tmp1); + AddLegalizedOperand(SDValue(Node, i), Tmp1); } return Tmp2; case ISD::EXTRACT_SUBREG: { @@ -1440,7 +1440,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // match the element type of the vector being created. if (Tmp2.getValueType() == Op.getValueType().getVectorElementType()) { - SDOperand ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, + SDValue ScVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, Tmp1.getValueType(), Tmp2); unsigned NumElts = Tmp1.getValueType().getVectorNumElements(); @@ -1451,14 +1451,14 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // We generate a shuffle of InVec and ScVec, so the shuffle mask // should be 0,1,2,3,4,5... with the appropriate element replaced with // elt 0 of the RHS. - SmallVector<SDOperand, 8> ShufOps; + SmallVector<SDValue, 8> ShufOps; for (unsigned i = 0; i != NumElts; ++i) { if (i != InsertPos->getValue()) ShufOps.push_back(DAG.getConstant(i, ShufMaskEltVT)); else ShufOps.push_back(DAG.getConstant(NumElts, ShufMaskEltVT)); } - SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, ShufMaskVT, + SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, ShufMaskVT, &ShufOps[0], ShufOps.size()); Result = DAG.getNode(ISD::VECTOR_SHUFFLE, Tmp1.getValueType(), @@ -1520,11 +1520,11 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { MVT VT = Node->getValueType(0); MVT EltVT = VT.getVectorElementType(); MVT PtrVT = TLI.getPointerTy(); - SDOperand Mask = Node->getOperand(2); + SDValue Mask = Node->getOperand(2); unsigned NumElems = Mask.getNumOperands(); - SmallVector<SDOperand,8> Ops; + SmallVector<SDValue,8> Ops; for (unsigned i = 0; i != NumElems; ++i) { - SDOperand Arg = Mask.getOperand(i); + SDValue Arg = Mask.getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) { Ops.push_back(DAG.getNode(ISD::UNDEF, EltVT)); } else { @@ -1551,7 +1551,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Tmp2 = DAG.getNode(ISD::BIT_CONVERT, NVT, Tmp2); // Convert the shuffle mask to the right # elements. - Tmp3 = SDOperand(isShuffleLegal(OVT, Node->getOperand(2)), 0); + Tmp3 = SDValue(isShuffleLegal(OVT, Node->getOperand(2)), 0); assert(Tmp3.Val && "Shuffle not legal?"); Result = DAG.getNode(ISD::VECTOR_SHUFFLE, NVT, Tmp1, Tmp2, Tmp3); Result = DAG.getNode(ISD::BIT_CONVERT, OVT, Result); @@ -1599,7 +1599,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Do not try to legalize the target-specific arguments (#1+). if (Tmp1 != Node->getOperand(0)) { - SmallVector<SDOperand, 8> Ops(Node->op_begin(), Node->op_end()); + SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); Ops[0] = Tmp1; Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); } @@ -1615,7 +1615,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // can overlap. assert(!IsLegalizingCall && "Inconsistent sequentialization of calls!"); // Note that we are selecting this call! - LastCALLSEQ_END = SDOperand(CallEnd, 0); + LastCALLSEQ_END = SDValue(CallEnd, 0); IsLegalizingCall = true; // Legalize the call, starting from the CALLSEQ_END. @@ -1627,8 +1627,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // If the CALLSEQ_START node hasn't been legalized first, legalize it. This // will cause this node to be legalized as well as handling libcalls right. if (LastCALLSEQ_END.Val != Node) { - LegalizeOp(SDOperand(FindCallStartFromCallEnd(Node), 0)); - DenseMap<SDOperand, SDOperand>::iterator I = LegalizedNodes.find(Op); + LegalizeOp(SDValue(FindCallStartFromCallEnd(Node), 0)); + DenseMap<SDValue, SDValue>::iterator I = LegalizedNodes.find(Op); assert(I != LegalizedNodes.end() && "Legalizing the call start should have legalized this node!"); return I->second; @@ -1641,7 +1641,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // an optional flag input. if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Flag){ if (Tmp1 != Node->getOperand(0)) { - SmallVector<SDOperand, 8> Ops(Node->op_begin(), Node->op_end()); + SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); Ops[0] = Tmp1; Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); } @@ -1649,7 +1649,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Tmp2 = LegalizeOp(Node->getOperand(Node->getNumOperands()-1)); if (Tmp1 != Node->getOperand(0) || Tmp2 != Node->getOperand(Node->getNumOperands()-1)) { - SmallVector<SDOperand, 8> Ops(Node->op_begin(), Node->op_end()); + SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); Ops[0] = Tmp1; Ops.back() = Tmp2; Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); @@ -1660,9 +1660,9 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { IsLegalizingCall = false; // If the CALLSEQ_END node has a flag, remember that we legalized it. - AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0)); + AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); if (Node->getNumValues() == 2) - AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1)); + AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result.getValue(Op.ResNo); case ISD::DYNAMIC_STACKALLOC: { MVT VT = Node->getValueType(0); @@ -1679,15 +1679,15 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" " not tell us which reg is the stack pointer!"); - SDOperand Chain = Tmp1.getOperand(0); + SDValue Chain = Tmp1.getOperand(0); // Chain the dynamic stack allocation so that it doesn't modify the stack // pointer when other instructions are using the stack. Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(0, TLI.getPointerTy())); - SDOperand Size = Tmp2.getOperand(1); - SDOperand SP = DAG.getCopyFromReg(Chain, SPReg, VT); + SDValue Size = Tmp2.getOperand(1); + SDValue SP = DAG.getCopyFromReg(Chain, SPReg, VT); Chain = SP.getValue(1); unsigned Align = cast<ConstantSDNode>(Tmp3)->getValue(); unsigned StackAlign = @@ -1702,7 +1702,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { DAG.getCALLSEQ_END(Chain, DAG.getConstant(0, TLI.getPointerTy()), DAG.getConstant(0, TLI.getPointerTy()), - SDOperand()); + SDValue()); Tmp1 = LegalizeOp(Tmp1); Tmp2 = LegalizeOp(Tmp2); @@ -1720,17 +1720,17 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } // Since this op produce two values, make sure to remember that we // legalized both of them. - AddLegalizedOperand(SDOperand(Node, 0), Tmp1); - AddLegalizedOperand(SDOperand(Node, 1), Tmp2); + AddLegalizedOperand(SDValue(Node, 0), Tmp1); + AddLegalizedOperand(SDValue(Node, 1), Tmp2); return Op.ResNo ? Tmp2 : Tmp1; } case ISD::INLINEASM: { - SmallVector<SDOperand, 8> Ops(Node->op_begin(), Node->op_end()); + SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end()); bool Changed = false; // Legalize all of the operands of the inline asm, in case they are nodes // that need to be expanded or something. Note we skip the asm string and // all of the TargetConstant flags. - SDOperand Op = LegalizeOp(Ops[0]); + SDValue Op = LegalizeOp(Ops[0]); Changed = Op != Ops[0]; Ops[0] = Op; @@ -1738,7 +1738,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { for (unsigned i = 2, e = Ops.size()-HasInFlag; i < e; ) { unsigned NumVals = cast<ConstantSDNode>(Ops[i])->getValue() >> 3; for (++i; NumVals; ++i, --NumVals) { - SDOperand Op = LegalizeOp(Ops[i]); + SDValue Op = LegalizeOp(Ops[i]); if (Op != Ops[i]) { Changed = true; Ops[i] = Op; @@ -1756,8 +1756,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = DAG.UpdateNodeOperands(Result, &Ops[0], Ops.size()); // INLINE asm returns a chain and flag, make sure to add both to the map. - AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0)); - AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1)); + AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); + AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result.getValue(Op.ResNo); } case ISD::BR: @@ -1802,17 +1802,17 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { if (Tmp1.Val) Result = Tmp1; break; case TargetLowering::Expand: { - SDOperand Chain = Result.getOperand(0); - SDOperand Table = Result.getOperand(1); - SDOperand Index = Result.getOperand(2); + SDValue Chain = Result.getOperand(0); + SDValue Table = Result.getOperand(1); + SDValue Index = Result.getOperand(2); MVT PTy = TLI.getPointerTy(); MachineFunction &MF = DAG.getMachineFunction(); unsigned EntrySize = MF.getJumpTableInfo()->getEntrySize(); Index= DAG.getNode(ISD::MUL, PTy, Index, DAG.getConstant(EntrySize, PTy)); - SDOperand Addr = DAG.getNode(ISD::ADD, PTy, Index, Table); + SDValue Addr = DAG.getNode(ISD::ADD, PTy, Index, Table); - SDOperand LD; + SDValue LD; switch (EntrySize) { default: assert(0 && "Size of jump table not supported yet."); break; case 4: LD = DAG.getLoad(MVT::i32, Chain, Addr, @@ -1971,8 +1971,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } // Since loads produce two values, make sure to remember that we // legalized both of them. - AddLegalizedOperand(SDOperand(Node, 0), Tmp3); - AddLegalizedOperand(SDOperand(Node, 1), Tmp4); + AddLegalizedOperand(SDValue(Node, 0), Tmp3); + AddLegalizedOperand(SDValue(Node, 1), Tmp4); return Op.ResNo ? Tmp4 : Tmp3; } else { MVT SrcVT = LD->getMemoryVT(); @@ -1995,7 +1995,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24. unsigned NewWidth = SrcVT.getStoreSizeInBits(); MVT NVT = MVT::getIntegerVT(NewWidth); - SDOperand Ch; + SDValue Ch; // The extra bits are guaranteed to be zero, since we stored them that // way. A zext load from NVT thus automatically gives zext from SrcVT. @@ -2032,7 +2032,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { "Load size not an integral number of bytes!"); MVT RoundVT = MVT::getIntegerVT(RoundWidth); MVT ExtraVT = MVT::getIntegerVT(ExtraWidth); - SDOperand Lo, Hi, Ch; + SDValue Lo, Hi, Ch; unsigned IncrementSize; if (TLI.isLittleEndian()) { @@ -2131,7 +2131,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case TargetLowering::Expand: // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND if (SrcVT == MVT::f32 && Node->getValueType(0) == MVT::f64) { - SDOperand Load = DAG.getLoad(SrcVT, Tmp1, Tmp2, LD->getSrcValue(), + SDValue Load = DAG.getLoad(SrcVT, Tmp1, Tmp2, LD->getSrcValue(), LD->getSrcValueOffset(), LD->isVolatile(), LD->getAlignment()); Result = DAG.getNode(ISD::FP_EXTEND, Node->getValueType(0), Load); @@ -2146,7 +2146,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Tmp1, Tmp2, LD->getSrcValue(), LD->getSrcValueOffset(), SrcVT, LD->isVolatile(), LD->getAlignment()); - SDOperand ValRes; + SDValue ValRes; if (ExtType == ISD::SEXTLOAD) ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, Result.getValueType(), Result, DAG.getValueType(SrcVT)); @@ -2160,8 +2160,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Since loads produce two values, make sure to remember that we legalized // both of them. - AddLegalizedOperand(SDOperand(Node, 0), Tmp1); - AddLegalizedOperand(SDOperand(Node, 1), Tmp2); + AddLegalizedOperand(SDValue(Node, 0), Tmp1); + AddLegalizedOperand(SDValue(Node, 1), Tmp2); return Op.ResNo ? Tmp2 : Tmp1; } } @@ -2215,8 +2215,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Since this produces two values, make sure to remember that we legalized // both of them. - AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0)); - AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1)); + AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); + AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result; } break; @@ -2239,7 +2239,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; case Expand: if (!Tmp2.getValueType().isVector()) { - SDOperand Lo, Hi; + SDValue Lo, Hi; ExpandOp(Tmp2, Lo, Hi); // Big endian systems want the hi reg first. @@ -2279,7 +2279,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } else { // FIXME: Returns of gcc generic vectors larger than a legal vector // type should be returned by reference! - SDOperand Lo, Hi; + SDValue Lo, Hi; SplitVectorOp(Tmp2, Lo, Hi); Result = DAG.getNode(ISD::RET, MVT::Other, Tmp1, Lo, Tmp3, Hi,Tmp3); Result = LegalizeOp(Result); @@ -2297,7 +2297,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = DAG.UpdateNodeOperands(Result, Tmp1); break; default: { // ret <values> - SmallVector<SDOperand, 8> NewValues; + SmallVector<SDValue, 8> NewValues; NewValues.push_back(Tmp1); for (unsigned i = 1, e = Node->getNumOperands(); i < e; i += 2) switch (getTypeAction(Node->getOperand(i).getValueType())) { @@ -2306,7 +2306,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { NewValues.push_back(Node->getOperand(i+1)); break; case Expand: { - SDOperand Lo, Hi; + SDValue Lo, Hi; assert(!Node->getOperand(i).getValueType().isExtended() && "FIXME: TODO: implement returning non-legal vector types!"); ExpandOp(Node->getOperand(i), Lo, Hi); @@ -2380,8 +2380,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // stores. If the target supports neither 32- nor 64-bits, this // xform is certainly not worth it. const APInt &IntVal =CFP->getValueAPF().convertToAPInt(); - SDOperand Lo = DAG.getConstant(APInt(IntVal).trunc(32), MVT::i32); - SDOperand Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); + SDValue Lo = DAG.getConstant(APInt(IntVal).trunc(32), MVT::i32); + SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32); if (TLI.isBigEndian()) std::swap(Lo, Hi); Lo = DAG.getStore(Tmp1, Lo, Tmp2, ST->getSrcValue(), @@ -2442,7 +2442,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case Expand: unsigned IncrementSize = 0; - SDOperand Lo, Hi; + SDValue Lo, Hi; // If this is a vector type, then we have to calculate the increment as // the product of the element size in bytes, and the number of elements @@ -2546,7 +2546,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { "Store size not an integral number of bytes!"); MVT RoundVT = MVT::getIntegerVT(RoundWidth); MVT ExtraVT = MVT::getIntegerVT(ExtraWidth); - SDOperand Lo, Hi; + SDValue Lo, Hi; unsigned IncrementSize; if (TLI.isLittleEndian()) { @@ -2655,8 +2655,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Since stacksave produce two values, make sure to remember that we // legalized both of them. - AddLegalizedOperand(SDOperand(Node, 0), Tmp1); - AddLegalizedOperand(SDOperand(Node, 1), Tmp2); + AddLegalizedOperand(SDValue(Node, 0), Tmp1); + AddLegalizedOperand(SDValue(Node, 1), Tmp2); return Op.ResNo ? Tmp2 : Tmp1; case ISD::STACKRESTORE: @@ -2702,8 +2702,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Since rdcc produce two values, make sure to remember that we legalized // both of them. - AddLegalizedOperand(SDOperand(Node, 0), Tmp1); - AddLegalizedOperand(SDOperand(Node, 1), Tmp2); + AddLegalizedOperand(SDValue(Node, 0), Tmp1); + AddLegalizedOperand(SDValue(Node, 1), Tmp2); return Result; case ISD::SELECT: @@ -2779,7 +2779,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Tmp2 = Node->getOperand(1); // RHS Tmp3 = LegalizeOp(Node->getOperand(2)); // True Tmp4 = LegalizeOp(Node->getOperand(3)); // False - SDOperand CC = Node->getOperand(4); + SDValue CC = Node->getOperand(4); LegalizeSetCCOperands(Tmp1, Tmp2, CC); @@ -2874,7 +2874,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::VSETCC: { Tmp1 = LegalizeOp(Node->getOperand(0)); // LHS Tmp2 = LegalizeOp(Node->getOperand(1)); // RHS - SDOperand CC = Node->getOperand(2); + SDValue CC = Node->getOperand(2); Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, CC); @@ -2893,7 +2893,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case ISD::SHL_PARTS: case ISD::SRA_PARTS: case ISD::SRL_PARTS: { - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; bool Changed = false; for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { Ops.push_back(LegalizeOp(Node->getOperand(i))); @@ -2909,10 +2909,10 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { case TargetLowering::Custom: Tmp1 = TLI.LowerOperation(Result, DAG); if (Tmp1.Val) { - SDOperand Tmp2, RetVal(0, 0); + SDValue Tmp2, RetVal(0, 0); for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) { Tmp2 = LegalizeOp(Tmp1.getValue(i)); - AddLegalizedOperand(SDOperand(Node, i), Tmp2); + AddLegalizedOperand(SDValue(Node, i), Tmp2); if (i == Op.ResNo) RetVal = Tmp2; } @@ -2925,7 +2925,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Since these produce multiple values, make sure to remember that we // legalized all of them. for (unsigned i = 0, e = Node->getNumValues(); i != e; ++i) - AddLegalizedOperand(SDOperand(Node, i), Result.getValue(i)); + AddLegalizedOperand(SDValue(Node, i), Result.getValue(i)); return Result.getValue(Op.ResNo); } @@ -2993,28 +2993,28 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { OpToUse = ISD::UMUL_LOHI; } if (OpToUse) { - Result = SDOperand(DAG.getNode(OpToUse, VTs, Tmp1, Tmp2).Val, 0); + Result = SDValue(DAG.getNode(OpToUse, VTs, Tmp1, Tmp2).Val, 0); break; } } if (Node->getOpcode() == ISD::MULHS && TLI.isOperationLegal(ISD::SMUL_LOHI, VT)) { - Result = SDOperand(DAG.getNode(ISD::SMUL_LOHI, VTs, Tmp1, Tmp2).Val, 1); + Result = SDValue(DAG.getNode(ISD::SMUL_LOHI, VTs, Tmp1, Tmp2).Val, 1); break; } if (Node->getOpcode() == ISD::MULHU && TLI.isOperationLegal(ISD::UMUL_LOHI, VT)) { - Result = SDOperand(DAG.getNode(ISD::UMUL_LOHI, VTs, Tmp1, Tmp2).Val, 1); + Result = SDValue(DAG.getNode(ISD::UMUL_LOHI, VTs, Tmp1, Tmp2).Val, 1); break; } if (Node->getOpcode() == ISD::SDIV && TLI.isOperationLegal(ISD::SDIVREM, VT)) { - Result = SDOperand(DAG.getNode(ISD::SDIVREM, VTs, Tmp1, Tmp2).Val, 0); + Result = SDValue(DAG.getNode(ISD::SDIVREM, VTs, Tmp1, Tmp2).Val, 0); break; } if (Node->getOpcode() == ISD::UDIV && TLI.isOperationLegal(ISD::UDIVREM, VT)) { - Result = SDOperand(DAG.getNode(ISD::UDIVREM, VTs, Tmp1, Tmp2).Val, 0); + Result = SDValue(DAG.getNode(ISD::UDIVREM, VTs, Tmp1, Tmp2).Val, 0); break; } @@ -3037,7 +3037,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { default: break; } if (LC != RTLIB::UNKNOWN_LIBCALL) { - SDOperand Dummy; + SDValue Dummy; Result = ExpandLibCall(LC, Node, isSigned, Dummy); break; } @@ -3116,11 +3116,11 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Get the sign bit of the RHS. MVT IVT = Tmp2.getValueType() == MVT::f32 ? MVT::i32 : MVT::i64; - SDOperand SignBit = DAG.getNode(ISD::BIT_CONVERT, IVT, Tmp2); + SDValue SignBit = DAG.getNode(ISD::BIT_CONVERT, IVT, Tmp2); SignBit = DAG.getSetCC(TLI.getSetCCResultType(SignBit), SignBit, DAG.getConstant(0, IVT), ISD::SETLT); // Get the absolute value of the result. - SDOperand AbsVal = DAG.getNode(ISD::FABS, Tmp1.getValueType(), Tmp1); + SDValue AbsVal = DAG.getNode(ISD::FABS, Tmp1.getValueType(), Tmp1); // Select between the nabs and abs value based on the sign bit of // the input. Result = DAG.getNode(ISD::SELECT, AbsVal.getValueType(), SignBit, @@ -3149,8 +3149,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2); // Since this produces two values, make sure to remember that we legalized // both of them. - AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0)); - AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1)); + AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); + AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result; case ISD::ADDE: @@ -3161,8 +3161,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, Tmp3); // Since this produces two values, make sure to remember that we legalized // both of them. - AddLegalizedOperand(SDOperand(Node, 0), Result.getValue(0)); - AddLegalizedOperand(SDOperand(Node, 1), Result.getValue(1)); + AddLegalizedOperand(SDValue(Node, 0), Result.getValue(0)); + AddLegalizedOperand(SDValue(Node, 1), Result.getValue(1)); return Result; case ISD::BUILD_PAIR: { @@ -3217,12 +3217,12 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { SDVTList VTs = DAG.getVTList(VT, VT); if (Node->getOpcode() == ISD::SREM && TLI.isOperationLegal(ISD::SDIVREM, VT)) { - Result = SDOperand(DAG.getNode(ISD::SDIVREM, VTs, Tmp1, Tmp2).Val, 1); + Result = SDValue(DAG.getNode(ISD::SDIVREM, VTs, Tmp1, Tmp2).Val, 1); break; } if (Node->getOpcode() == ISD::UREM && TLI.isOperationLegal(ISD::UDIVREM, VT)) { - Result = SDOperand(DAG.getNode(ISD::UDIVREM, VTs, Tmp1, Tmp2).Val, 1); + Result = SDValue(DAG.getNode(ISD::UDIVREM, VTs, Tmp1, Tmp2).Val, 1); break; } @@ -3240,7 +3240,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { "Cannot expand this binary operator!"); RTLIB::Libcall LC = Node->getOpcode() == ISD::UREM ? RTLIB::UREM_I32 : RTLIB::SREM_I32; - SDOperand Dummy; + SDValue Dummy; Result = ExpandLibCall(LC, Node, isSigned, Dummy); } } else { @@ -3252,7 +3252,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // Floating point mod -> fmod libcall. RTLIB::Libcall LC = GetFPLibCall(VT, RTLIB::REM_F32, RTLIB::REM_F64, RTLIB::REM_F80, RTLIB::REM_PPCF128); - SDOperand Dummy; + SDValue Dummy; Result = ExpandLibCall(LC, Node, false/*sign irrelevant*/, Dummy); } } @@ -3285,7 +3285,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; case TargetLowering::Expand: { const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); - SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0); + SDValue VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0); // Increment the pointer, VAList, to the next vaarg Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList, DAG.getConstant(VT.getSizeInBits()/8, @@ -3301,8 +3301,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { } // Since VAARG produces two values, make sure to remember that we // legalized both of them. - AddLegalizedOperand(SDOperand(Node, 0), Result); - AddLegalizedOperand(SDOperand(Node, 1), Tmp1); + AddLegalizedOperand(SDValue(Node, 0), Result); + AddLegalizedOperand(SDValue(Node, 1), Tmp1); return Op.ResNo ? Tmp1 : Result; } @@ -3537,7 +3537,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; default: assert(0 && "Unreachable!"); } - SDOperand Dummy; + SDValue Dummy; Result = ExpandLibCall(LC, Node, false/*sign irrelevant*/, Dummy); break; } @@ -3557,7 +3557,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // We always lower FPOWI into a libcall. No target support for it yet. RTLIB::Libcall LC = GetFPLibCall(VT, RTLIB::POWI_F32, RTLIB::POWI_F64, RTLIB::POWI_F80, RTLIB::POWI_PPCF128); - SDOperand Dummy; + SDValue Dummy; Result = ExpandLibCall(LC, Node, false/*sign irrelevant*/, Dummy); break; } @@ -3701,7 +3701,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; case TargetLowering::Expand: if (Node->getOpcode() == ISD::FP_TO_UINT) { - SDOperand True, False; + SDValue True, False; MVT VT = Node->getOperand(0).getValueType(); MVT NVT = Node->getValueType(0); const uint64_t zero[] = {0, 0}; @@ -3758,7 +3758,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { RTLIB::Libcall LC = (Node->getOpcode() == ISD::FP_TO_SINT) ? RTLIB::getFPTOSINT(OVT, VT) : RTLIB::getFPTOUINT(OVT, VT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpectd fp-to-int conversion!"); - SDOperand Dummy; + SDValue Dummy; Result = ExpandLibCall(LC, Node, false/*sign irrelevant*/, Dummy); break; } @@ -3797,7 +3797,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { MVT SrcVT = Op.getOperand(0).getValueType(); if (TLI.getConvertAction(SrcVT, DstVT) == TargetLowering::Expand) { if (SrcVT == MVT::ppcf128) { - SDOperand Lo; + SDValue Lo; ExpandOp(Node->getOperand(0), Lo, Result); // Round it the rest of the way (e.g. to f32) if needed. if (DstVT!=MVT::f64) @@ -3878,7 +3878,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // SAR. However, it is doubtful that any exist. unsigned BitsDiff = Node->getValueType(0).getSizeInBits() - ExtraVT.getSizeInBits(); - SDOperand ShiftCst = DAG.getConstant(BitsDiff, TLI.getShiftAmountTy()); + SDValue ShiftCst = DAG.getConstant(BitsDiff, TLI.getShiftAmountTy()); Result = DAG.getNode(ISD::SHL, Node->getValueType(0), Node->getOperand(0), ShiftCst); Result = DAG.getNode(ISD::SRA, Node->getValueType(0), @@ -3900,7 +3900,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { break; } case ISD::TRAMPOLINE: { - SDOperand Ops[6]; + SDValue Ops[6]; for (unsigned i = 0; i != 6; ++i) Ops[i] = LegalizeOp(Node->getOperand(i)); Result = DAG.UpdateNodeOperands(Result, Ops, 6); @@ -3912,8 +3912,8 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // legalized both of them. Tmp1 = LegalizeOp(Result.getValue(1)); Result = LegalizeOp(Result); - AddLegalizedOperand(SDOperand(Node, 0), Result); - AddLegalizedOperand(SDOperand(Node, 1), Tmp1); + AddLegalizedOperand(SDValue(Node, 0), Result); + AddLegalizedOperand(SDValue(Node, 1), Tmp1); return Op.ResNo ? Tmp1 : Result; } case ISD::FLT_ROUNDS_: { @@ -3947,7 +3947,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { // If this operation is not supported, lower it to 'abort()' call Tmp1 = LegalizeOp(Node->getOperand(0)); TargetLowering::ArgListTy Args; - std::pair<SDOperand,SDOperand> CallResult = + std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(Tmp1, Type::VoidTy, false, false, false, CallingConv::C, false, DAG.getExternalSymbol("abort", TLI.getPointerTy()), @@ -3976,7 +3976,7 @@ SDOperand SelectionDAGLegalize::LegalizeOp(SDOperand Op) { /// promote it to compute the value into a larger type. The produced value will /// have the correct bits for the low portion of the register, but no guarantee /// is made about the top bits: it may be zero, sign-extended, or garbage. -SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { +SDValue SelectionDAGLegalize::PromoteOp(SDValue Op) { MVT VT = Op.getValueType(); MVT NVT = TLI.getTypeToTransformTo(VT); assert(getTypeAction(VT) == Promote && @@ -3984,11 +3984,11 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { assert(NVT.bitsGT(VT) && NVT.isInteger() == VT.isInteger() && "Cannot promote to smaller type!"); - SDOperand Tmp1, Tmp2, Tmp3; - SDOperand Result; + SDValue Tmp1, Tmp2, Tmp3; + SDValue Result; SDNode *Node = Op.Val; - DenseMap<SDOperand, SDOperand>::iterator I = PromotedNodes.find(Op); + DenseMap<SDValue, SDValue>::iterator I = PromotedNodes.find(Op); if (I != PromotedNodes.end()) return I->second; switch (Node->getOpcode()) { @@ -4331,7 +4331,7 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { Result = TLI.LowerOperation(Tmp3, DAG); } else { const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); - SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0); + SDValue VAList = DAG.getLoad(TLI.getPointerTy(), Tmp1, Tmp2, V, 0); // Increment the pointer, VAList, to the next vaarg Tmp3 = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList, DAG.getConstant(VT.getSizeInBits()/8, @@ -4436,12 +4436,12 @@ SDOperand SelectionDAGLegalize::PromoteOp(SDOperand Op) { /// a legal EXTRACT_VECTOR_ELT operation, scalar code, or memory traffic, /// based on the vector type. The return type of this matches the element type /// of the vector, which may not be legal for the target. -SDOperand SelectionDAGLegalize::ExpandEXTRACT_VECTOR_ELT(SDOperand Op) { +SDValue SelectionDAGLegalize::ExpandEXTRACT_VECTOR_ELT(SDValue Op) { // We know that operand #0 is the Vec vector. If the index is a constant // or if the invec is a supported hardware type, we can use it. Otherwise, // lower to a store then an indexed load. - SDOperand Vec = Op.getOperand(0); - SDOperand Idx = Op.getOperand(1); + SDValue Vec = Op.getOperand(0); + SDValue Idx = Op.getOperand(1); MVT TVT = Vec.getValueType(); unsigned NumElems = TVT.getVectorNumElements(); @@ -4451,7 +4451,7 @@ SDOperand SelectionDAGLegalize::ExpandEXTRACT_VECTOR_ELT(SDOperand Op) { case TargetLowering::Custom: { Vec = LegalizeOp(Vec); Op = DAG.UpdateNodeOperands(Op, Vec, Idx); - SDOperand Tmp3 = TLI.LowerOperation(Op, DAG); + SDValue Tmp3 = TLI.LowerOperation(Op, DAG); if (Tmp3.Val) return Tmp3; break; @@ -4473,7 +4473,7 @@ SDOperand SelectionDAGLegalize::ExpandEXTRACT_VECTOR_ELT(SDOperand Op) { } else if (!TLI.isTypeLegal(TVT) && isa<ConstantSDNode>(Idx)) { unsigned NumLoElts = 1 << Log2_32(NumElems-1); ConstantSDNode *CIdx = cast<ConstantSDNode>(Idx); - SDOperand Lo, Hi; + SDValue Lo, Hi; SplitVectorOp(Vec, Lo, Hi); if (CIdx->getValue() < NumLoElts) { Vec = Lo; @@ -4489,8 +4489,8 @@ SDOperand SelectionDAGLegalize::ExpandEXTRACT_VECTOR_ELT(SDOperand Op) { } else { // Store the value to a temporary stack slot, then LOAD the scalar // element back out. - SDOperand StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); - SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0); + SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType()); + SDValue Ch = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0); // Add the offset to the index. unsigned EltSize = Op.getValueType().getSizeInBits()/8; @@ -4511,11 +4511,11 @@ SDOperand SelectionDAGLegalize::ExpandEXTRACT_VECTOR_ELT(SDOperand Op) { /// ExpandEXTRACT_SUBVECTOR - Expand a EXTRACT_SUBVECTOR operation. For now /// we assume the operation can be split if it is not already legal. -SDOperand SelectionDAGLegalize::ExpandEXTRACT_SUBVECTOR(SDOperand Op) { +SDValue SelectionDAGLegalize::ExpandEXTRACT_SUBVECTOR(SDValue Op) { // We know that operand #0 is the Vec vector. For now we assume the index // is a constant and that the extracted result is a supported hardware type. - SDOperand Vec = Op.getOperand(0); - SDOperand Idx = LegalizeOp(Op.getOperand(1)); + SDValue Vec = Op.getOperand(0); + SDValue Idx = LegalizeOp(Op.getOperand(1)); unsigned NumElems = Vec.getValueType().getVectorNumElements(); @@ -4525,7 +4525,7 @@ SDOperand SelectionDAGLegalize::ExpandEXTRACT_SUBVECTOR(SDOperand Op) { } ConstantSDNode *CIdx = cast<ConstantSDNode>(Idx); - SDOperand Lo, Hi; + SDValue Lo, Hi; SplitVectorOp(Vec, Lo, Hi); if (CIdx->getValue() < NumElems/2) { Vec = Lo; @@ -4544,11 +4544,11 @@ SDOperand SelectionDAGLegalize::ExpandEXTRACT_SUBVECTOR(SDOperand Op) { /// or promoting the arguments. In the case where LHS and RHS must be expanded, /// there may be no choice but to create a new SetCC node to represent the /// legalized value of setcc lhs, rhs. In this case, the value is returned in -/// LHS, and the SDOperand returned in RHS has a nil SDNode value. -void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, - SDOperand &RHS, - SDOperand &CC) { - SDOperand Tmp1, Tmp2, Tmp3, Result; +/// LHS, and the SDValue returned in RHS has a nil SDNode value. +void SelectionDAGLegalize::LegalizeSetCCOperands(SDValue &LHS, + SDValue &RHS, + SDValue &CC) { + SDValue Tmp1, Tmp2, Tmp3, Result; switch (getTypeAction(LHS.getValueType())) { case Legal: @@ -4656,8 +4656,8 @@ void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, } } - SDOperand Dummy; - SDOperand Ops[2] = { LHS, RHS }; + SDValue Dummy; + SDValue Ops[2] = { LHS, RHS }; Tmp1 = ExpandLibCall(LC1, DAG.getMergeValues(Ops, 2).Val, false /*sign irrelevant*/, Dummy); Tmp2 = DAG.getConstant(0, MVT::i32); @@ -4670,14 +4670,14 @@ void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, Tmp2 = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(LHS), LHS, Tmp2, DAG.getCondCode(TLI.getCmpLibcallCC(LC2))); Tmp1 = DAG.getNode(ISD::OR, Tmp1.getValueType(), Tmp1, Tmp2); - Tmp2 = SDOperand(); + Tmp2 = SDValue(); } LHS = LegalizeOp(Tmp1); RHS = Tmp2; return; } - SDOperand LHSLo, LHSHi, RHSLo, RHSHi; + SDValue LHSLo, LHSHi, RHSLo, RHSHi; ExpandOp(LHS, LHSLo, LHSHi); ExpandOp(RHS, RHSLo, RHSHi); ISD::CondCode CCCode = cast<CondCodeSDNode>(CC)->get(); @@ -4695,7 +4695,7 @@ void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, CCCode); Tmp1 = DAG.getNode(ISD::AND, Tmp1.getValueType(), Tmp1, Tmp2); Tmp1 = DAG.getNode(ISD::OR, Tmp1.getValueType(), Tmp1, Tmp3); - Tmp2 = SDOperand(); + Tmp2 = SDValue(); break; } @@ -4773,7 +4773,7 @@ void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, // For LE / GE, if high part is known false, ignore the low part. // For LT / GT, if high part is known true, ignore the low part. Tmp1 = Tmp2; - Tmp2 = SDOperand(); + Tmp2 = SDValue(); } else { Result = TLI.SimplifySetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, ISD::SETEQ, false, DagCombineInfo); @@ -4783,7 +4783,7 @@ void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, Result = LegalizeOp(DAG.getNode(ISD::SELECT, Tmp1.getValueType(), Result, Tmp1, Tmp2)); Tmp1 = Result; - Tmp2 = SDOperand(); + Tmp2 = SDValue(); } } } @@ -4796,13 +4796,13 @@ void SelectionDAGLegalize::LegalizeSetCCOperands(SDOperand &LHS, /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does /// a load from the stack slot to DestVT, extending it if needed. /// The resultant code need not be legal. -SDOperand SelectionDAGLegalize::EmitStackConvert(SDOperand SrcOp, - MVT SlotVT, - MVT DestVT) { +SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, + MVT SlotVT, + MVT DestVT) { // Create the stack frame object. unsigned SrcAlign = TLI.getTargetData()->getPrefTypeAlignment( SrcOp.getValueType().getTypeForMVT()); - SDOperand FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); + SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(FIPtr); int SPFI = StackPtrFI->getIndex(); @@ -4815,7 +4815,7 @@ SDOperand SelectionDAGLegalize::EmitStackConvert(SDOperand SrcOp, // Emit a store to the stack slot. Use a truncstore if the input value is // later than DestVT. - SDOperand Store; + SDValue Store; if (SrcSize > SlotSize) Store = DAG.getTruncStore(DAG.getEntryNode(), SrcOp, FIPtr, @@ -4837,15 +4837,15 @@ SDOperand SelectionDAGLegalize::EmitStackConvert(SDOperand SrcOp, false, DestAlign); } -SDOperand SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { +SDValue SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { // Create a vector sized/aligned stack slot, store the value to element #0, // then load the whole vector back out. - SDOperand StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); + SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); FrameIndexSDNode *StackPtrFI = cast<FrameIndexSDNode>(StackPtr); int SPFI = StackPtrFI->getIndex(); - SDOperand Ch = DAG.getStore(DAG.getEntryNode(), Node->getOperand(0), StackPtr, + SDValue Ch = DAG.getStore(DAG.getEntryNode(), Node->getOperand(0), StackPtr, PseudoSourceValue::getFixedStack(SPFI), 0); return DAG.getLoad(Node->getValueType(0), Ch, StackPtr, PseudoSourceValue::getFixedStack(SPFI), 0); @@ -4854,17 +4854,17 @@ SDOperand SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode *Node) { /// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't /// support the operation, but do support the resultant vector type. -SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { +SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { // If the only non-undef value is the low element, turn this into a // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X. unsigned NumElems = Node->getNumOperands(); bool isOnlyLowElement = true; - SDOperand SplatValue = Node->getOperand(0); + SDValue SplatValue = Node->getOperand(0); - // FIXME: it would be far nicer to change this into map<SDOperand,uint64_t> + // FIXME: it would be far nicer to change this into map<SDValue,uint64_t> // and use a bitmask instead of a list of elements. - std::map<SDOperand, std::vector<unsigned> > Values; + std::map<SDValue, std::vector<unsigned> > Values; Values[SplatValue].push_back(0); bool isConstant = true; if (!isa<ConstantFPSDNode>(SplatValue) && !isa<ConstantSDNode>(SplatValue) && @@ -4872,12 +4872,12 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { isConstant = false; for (unsigned i = 1; i < NumElems; ++i) { - SDOperand V = Node->getOperand(i); + SDValue V = Node->getOperand(i); Values[V].push_back(i); if (V.getOpcode() != ISD::UNDEF) isOnlyLowElement = false; if (SplatValue != V) - SplatValue = SDOperand(0,0); + SplatValue = SDValue(0,0); // If this isn't a constant element or an undef, we can't use a constant // pool load. @@ -4914,7 +4914,7 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { } } Constant *CP = ConstantVector::get(CV); - SDOperand CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); + SDValue CPIdx = DAG.getConstantPool(CP, TLI.getPointerTy()); return DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0); } @@ -4922,15 +4922,15 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { if (SplatValue.Val) { // Splat of one value? // Build the shuffle constant vector: <0, 0, 0, 0> MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); - SDOperand Zero = DAG.getConstant(0, MaskVT.getVectorElementType()); - std::vector<SDOperand> ZeroVec(NumElems, Zero); - SDOperand SplatMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, + SDValue Zero = DAG.getConstant(0, MaskVT.getVectorElementType()); + std::vector<SDValue> ZeroVec(NumElems, Zero); + SDValue SplatMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &ZeroVec[0], ZeroVec.size()); // If the target supports VECTOR_SHUFFLE and this shuffle mask, use it. if (isShuffleLegal(Node->getValueType(0), SplatMask)) { // Get the splatted value into the low element of a vector register. - SDOperand LowValVec = + SDValue LowValVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, Node->getValueType(0), SplatValue); // Return shuffle(LowValVec, undef, <0,0,0,0>) @@ -4944,9 +4944,9 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { // vector shuffle. if (Values.size() == 2) { // Get the two values in deterministic order. - SDOperand Val1 = Node->getOperand(1); - SDOperand Val2; - std::map<SDOperand, std::vector<unsigned> >::iterator MI = Values.begin(); + SDValue Val1 = Node->getOperand(1); + SDValue Val2; + std::map<SDValue, std::vector<unsigned> >::iterator MI = Values.begin(); if (MI->first != Val1) Val2 = MI->first; else @@ -4960,7 +4960,7 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { // Build the shuffle constant vector: e.g. <0, 4, 0, 4> MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT MaskEltVT = MaskVT.getVectorElementType(); - std::vector<SDOperand> MaskVec(NumElems); + std::vector<SDValue> MaskVec(NumElems); // Set elements of the shuffle mask for Val1. std::vector<unsigned> &Val1Elts = Values[Val1]; @@ -4975,7 +4975,7 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { else MaskVec[Val2Elts[i]] = DAG.getNode(ISD::UNDEF, MaskEltVT); - SDOperand ShuffleMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, + SDValue ShuffleMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); // If the target supports SCALAR_TO_VECTOR and this shuffle mask, use it. @@ -4983,7 +4983,7 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { isShuffleLegal(Node->getValueType(0), ShuffleMask)) { Val1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, Node->getValueType(0), Val1); Val2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, Node->getValueType(0), Val2); - SDOperand Ops[] = { Val1, Val2, ShuffleMask }; + SDValue Ops[] = { Val1, Val2, ShuffleMask }; // Return shuffle(LoValVec, HiValVec, <0,1,0,1>) return DAG.getNode(ISD::VECTOR_SHUFFLE, Node->getValueType(0), Ops, 3); @@ -4995,10 +4995,10 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { // the result as a vector. MVT VT = Node->getValueType(0); // Create the stack frame object. - SDOperand FIPtr = DAG.CreateStackTemporary(VT); + SDValue FIPtr = DAG.CreateStackTemporary(VT); // Emit a store of each element to the stack slot. - SmallVector<SDOperand, 8> Stores; + SmallVector<SDValue, 8> Stores; unsigned TypeByteSize = Node->getOperand(0).getValueType().getSizeInBits()/8; // Store (in the right endianness) the elements to memory. for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) { @@ -5007,14 +5007,14 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { unsigned Offset = TypeByteSize*i; - SDOperand Idx = DAG.getConstant(Offset, FIPtr.getValueType()); + SDValue Idx = DAG.getConstant(Offset, FIPtr.getValueType()); Idx = DAG.getNode(ISD::ADD, FIPtr.getValueType(), FIPtr, Idx); Stores.push_back(DAG.getStore(DAG.getEntryNode(), Node->getOperand(i), Idx, NULL, 0)); } - SDOperand StoreChain; + SDValue StoreChain; if (!Stores.empty()) // Not all undef elements? StoreChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Stores[0], Stores.size()); @@ -5026,13 +5026,13 @@ SDOperand SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) { } void SelectionDAGLegalize::ExpandShiftParts(unsigned NodeOp, - SDOperand Op, SDOperand Amt, - SDOperand &Lo, SDOperand &Hi) { + SDValue Op, SDValue Amt, + SDValue &Lo, SDValue &Hi) { // Expand the subcomponents. - SDOperand LHSL, LHSH; + SDValue LHSL, LHSH; ExpandOp(Op, LHSL, LHSH); - SDOperand Ops[] = { LHSL, LHSH, Amt }; + SDValue Ops[] = { LHSL, LHSH, Amt }; MVT VT = LHSL.getValueType(); Lo = DAG.getNode(NodeOp, DAG.getNodeValueTypes(VT, VT), 2, Ops, 3); Hi = Lo.getValue(1); @@ -5043,13 +5043,13 @@ void SelectionDAGLegalize::ExpandShiftParts(unsigned NodeOp, /// smaller elements. If we can't find a way that is more efficient than a /// libcall on this target, return false. Otherwise, return true with the /// low-parts expanded into Lo and Hi. -bool SelectionDAGLegalize::ExpandShift(unsigned Opc, SDOperand Op,SDOperand Amt, - SDOperand &Lo, SDOperand &Hi) { +bool SelectionDAGLegalize::ExpandShift(unsigned Opc, SDValue Op,SDValue Amt, + SDValue &Lo, SDValue &Hi) { assert((Opc == ISD::SHL || Opc == ISD::SRA || Opc == ISD::SRL) && "This is not a shift!"); MVT NVT = TLI.getTypeToTransformTo(Op.getValueType()); - SDOperand ShAmt = LegalizeOp(Amt); + SDValue ShAmt = LegalizeOp(Amt); MVT ShTy = ShAmt.getValueType(); unsigned ShBits = ShTy.getSizeInBits(); unsigned VTBits = Op.getValueType().getSizeInBits(); @@ -5059,7 +5059,7 @@ bool SelectionDAGLegalize::ExpandShift(unsigned Opc, SDOperand Op,SDOperand Amt, if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Amt.Val)) { unsigned Cst = CN->getValue(); // Expand the incoming operand to be shifted, so that we have its parts - SDOperand InL, InH; + SDValue InL, InH; ExpandOp(Op, InL, InH); switch(Opc) { case ISD::SHL: @@ -5133,7 +5133,7 @@ bool SelectionDAGLegalize::ExpandShift(unsigned Opc, SDOperand Op,SDOperand Amt, DAG.getConstant(~Mask, Amt.getValueType())); // Expand the incoming operand to be shifted, so that we have its parts - SDOperand InL, InH; + SDValue InL, InH; ExpandOp(Op, InL, InH); switch(Opc) { case ISD::SHL: @@ -5156,12 +5156,12 @@ bool SelectionDAGLegalize::ExpandShift(unsigned Opc, SDOperand Op,SDOperand Amt, // do this as a couple of simple shifts. if ((KnownZero & Mask) == Mask) { // Compute 32-amt. - SDOperand Amt2 = DAG.getNode(ISD::SUB, Amt.getValueType(), + SDValue Amt2 = DAG.getNode(ISD::SUB, Amt.getValueType(), DAG.getConstant(NVTBits, Amt.getValueType()), Amt); // Expand the incoming operand to be shifted, so that we have its parts - SDOperand InL, InH; + SDValue InL, InH; ExpandOp(Op, InL, InH); switch(Opc) { case ISD::SHL: @@ -5193,13 +5193,13 @@ bool SelectionDAGLegalize::ExpandShift(unsigned Opc, SDOperand Op,SDOperand Amt, // does not fit into a register, return the lo part and set the hi part to the // by-reg argument. If it does fit into a single register, return the result // and leave the Hi part unset. -SDOperand SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, - bool isSigned, SDOperand &Hi) { +SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, + bool isSigned, SDValue &Hi) { assert(!IsLegalizingCall && "Cannot overlap legalization of calls!"); // The input chain to this libcall is the entry node of the function. // Legalizing the call will automatically add the previous call to the // dependence. - SDOperand InChain = DAG.getEntryNode(); + SDValue InChain = DAG.getEntryNode(); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; @@ -5211,12 +5211,12 @@ SDOperand SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, Entry.isZExt = !isSigned; Args.push_back(Entry); } - SDOperand Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), + SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), TLI.getPointerTy()); // Splice the libcall in wherever FindInputOutputChains tells us to. const Type *RetTy = Node->getValueType(0).getTypeForMVT(); - std::pair<SDOperand,SDOperand> CallInfo = + std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, CallingConv::C, false, Callee, Args, DAG); @@ -5224,7 +5224,7 @@ SDOperand SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that // was added by LowerCallTo (guaranteeing proper serialization of calls). LegalizeOp(CallInfo.second); - SDOperand Result; + SDValue Result; switch (getTypeAction(CallInfo.first.getValueType())) { default: assert(0 && "Unknown thing"); case Legal: @@ -5240,8 +5240,8 @@ SDOperand SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node, /// ExpandIntToFP - Expand a [US]INT_TO_FP operation. /// -SDOperand SelectionDAGLegalize:: -ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source) { +SDValue SelectionDAGLegalize:: +ExpandIntToFP(bool isSigned, MVT DestTy, SDValue Source) { MVT SourceVT = Source.getValueType(); bool ExpandSource = getTypeAction(SourceVT) == Expand; @@ -5250,9 +5250,9 @@ ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source) { // The integer value loaded will be incorrectly if the 'sign bit' of the // incoming integer is set. To handle this, we dynamically test to see if // it is set, and, if so, add a fudge factor. - SDOperand Hi; + SDValue Hi; if (ExpandSource) { - SDOperand Lo; + SDValue Lo; ExpandOp(Source, Lo, Hi); Source = DAG.getNode(ISD::BUILD_PAIR, SourceVT, Lo, Hi); } else { @@ -5262,21 +5262,21 @@ ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source) { // If this is unsigned, and not supported, first perform the conversion to // signed, then adjust the result if the sign bit is set. - SDOperand SignedConv = ExpandIntToFP(true, DestTy, Source); + SDValue SignedConv = ExpandIntToFP(true, DestTy, Source); - SDOperand SignSet = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi, + SDValue SignSet = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi, DAG.getConstant(0, Hi.getValueType()), ISD::SETLT); - SDOperand Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); - SDOperand CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(), + SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); + SDValue CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(), SignSet, Four, Zero); uint64_t FF = 0x5f800000ULL; if (TLI.isLittleEndian()) FF <<= 32; static Constant *FudgeFactor = ConstantInt::get(Type::Int64Ty, FF); - SDOperand CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); + SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); CPIdx = DAG.getNode(ISD::ADD, TLI.getPointerTy(), CPIdx, CstOffset); - SDOperand FudgeInReg; + SDValue FudgeInReg; if (DestTy == MVT::f32) FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0); @@ -5310,7 +5310,7 @@ ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source) { case TargetLowering::Expand: break; // This case is handled below. case TargetLowering::Custom: { - SDOperand NV = TLI.LowerOperation(DAG.getNode(ISD::SINT_TO_FP, DestTy, + SDValue NV = TLI.LowerOperation(DAG.getNode(ISD::SINT_TO_FP, DestTy, Source), DAG); if (NV.Val) return LegalizeOp(NV); @@ -5321,7 +5321,7 @@ ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source) { // Expand the source, then glue it back together for the call. We must expand // the source in case it is shared (this pass of legalize must traverse it). if (ExpandSource) { - SDOperand SrcLo, SrcHi; + SDValue SrcLo, SrcHi; ExpandOp(Source, SrcLo, SrcHi); Source = DAG.getNode(ISD::BUILD_PAIR, SourceVT, SrcLo, SrcHi); } @@ -5332,8 +5332,8 @@ ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source) { assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unknown int value type"); Source = DAG.getNode(ISD::SINT_TO_FP, DestTy, Source); - SDOperand HiPart; - SDOperand Result = ExpandLibCall(LC, Source.Val, isSigned, HiPart); + SDValue HiPart; + SDValue Result = ExpandLibCall(LC, Source.Val, isSigned, HiPart); if (Result.getValueType() != DestTy && HiPart.Val) Result = DAG.getNode(ISD::BUILD_PAIR, DestTy, Result, HiPart); return Result; @@ -5343,50 +5343,50 @@ ExpandIntToFP(bool isSigned, MVT DestTy, SDOperand Source) { /// INT_TO_FP operation of the specified operand when the target requests that /// we expand it. At this point, we know that the result and operand types are /// legal for the target. -SDOperand SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, - SDOperand Op0, - MVT DestVT) { +SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, + SDValue Op0, + MVT DestVT) { if (Op0.getValueType() == MVT::i32) { // simple 32-bit [signed|unsigned] integer to float/double expansion // Get the stack frame index of a 8 byte buffer. - SDOperand StackSlot = DAG.CreateStackTemporary(MVT::f64); + SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64); // word offset constant for Hi/Lo address computation - SDOperand WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); + SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy()); // set up Hi and Lo (into buffer) address based on endian - SDOperand Hi = StackSlot; - SDOperand Lo = DAG.getNode(ISD::ADD, TLI.getPointerTy(), StackSlot,WordOff); + SDValue Hi = StackSlot; + SDValue Lo = DAG.getNode(ISD::ADD, TLI.getPointerTy(), StackSlot,WordOff); if (TLI.isLittleEndian()) std::swap(Hi, Lo); // if signed map to unsigned space - SDOperand Op0Mapped; + SDValue Op0Mapped; if (isSigned) { // constant used to invert sign bit (signed to unsigned mapping) - SDOperand SignBit = DAG.getConstant(0x80000000u, MVT::i32); + SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32); Op0Mapped = DAG.getNode(ISD::XOR, MVT::i32, Op0, SignBit); } else { Op0Mapped = Op0; } // store the lo of the constructed double - based on integer input - SDOperand Store1 = DAG.getStore(DAG.getEntryNode(), + SDValue Store1 = DAG.getStore(DAG.getEntryNode(), Op0Mapped, Lo, NULL, 0); // initial hi portion of constructed double - SDOperand InitialHi = DAG.getConstant(0x43300000u, MVT::i32); + SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32); // store the hi of the constructed double - biased exponent - SDOperand Store2=DAG.getStore(Store1, InitialHi, Hi, NULL, 0); + SDValue Store2=DAG.getStore(Store1, InitialHi, Hi, NULL, 0); // load the constructed double - SDOperand Load = DAG.getLoad(MVT::f64, Store2, StackSlot, NULL, 0); + SDValue Load = DAG.getLoad(MVT::f64, Store2, StackSlot, NULL, 0); // FP constant to bias correct the final result - SDOperand Bias = DAG.getConstantFP(isSigned ? + SDValue Bias = DAG.getConstantFP(isSigned ? BitsToDouble(0x4330000080000000ULL) : BitsToDouble(0x4330000000000000ULL), MVT::f64); // subtract the bias - SDOperand Sub = DAG.getNode(ISD::FSUB, MVT::f64, Load, Bias); + SDValue Sub = DAG.getNode(ISD::FSUB, MVT::f64, Load, Bias); // final result - SDOperand Result; + SDValue Result; // handle final rounding if (DestVT == MVT::f64) { // do nothing @@ -5400,13 +5400,13 @@ SDOperand SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, return Result; } assert(!isSigned && "Legalize cannot Expand SINT_TO_FP for i64 yet"); - SDOperand Tmp1 = DAG.getNode(ISD::SINT_TO_FP, DestVT, Op0); + SDValue Tmp1 = DAG.getNode(ISD::SINT_TO_FP, DestVT, Op0); - SDOperand SignSet = DAG.getSetCC(TLI.getSetCCResultType(Op0), Op0, + SDValue SignSet = DAG.getSetCC(TLI.getSetCCResultType(Op0), Op0, DAG.getConstant(0, Op0.getValueType()), ISD::SETLT); - SDOperand Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); - SDOperand CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(), + SDValue Zero = DAG.getIntPtrConstant(0), Four = DAG.getIntPtrConstant(4); + SDValue CstOffset = DAG.getNode(ISD::SELECT, Zero.getValueType(), SignSet, Four, Zero); // If the sign bit of the integer is set, the large number will be treated @@ -5423,9 +5423,9 @@ SDOperand SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, if (TLI.isLittleEndian()) FF <<= 32; static Constant *FudgeFactor = ConstantInt::get(Type::Int64Ty, FF); - SDOperand CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); + SDValue CPIdx = DAG.getConstantPool(FudgeFactor, TLI.getPointerTy()); CPIdx = DAG.getNode(ISD::ADD, TLI.getPointerTy(), CPIdx, CstOffset); - SDOperand FudgeInReg; + SDValue FudgeInReg; if (DestVT == MVT::f32) FudgeInReg = DAG.getLoad(MVT::f32, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0); @@ -5445,9 +5445,9 @@ SDOperand SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, /// we promote it. At this point, we know that the result and operand types are /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP /// operation that takes a larger input. -SDOperand SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDOperand LegalOp, - MVT DestVT, - bool isSigned) { +SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp, + MVT DestVT, + bool isSigned) { // First step, figure out the appropriate *INT_TO_FP operation to use. MVT NewInTy = LegalOp.getValueType(); @@ -5500,9 +5500,9 @@ SDOperand SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDOperand LegalOp, /// we promote it. At this point, we know that the result and operand types are /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT /// operation that returns a larger result. -SDOperand SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDOperand LegalOp, - MVT DestVT, - bool isSigned) { +SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp, + MVT DestVT, + bool isSigned) { // First step, figure out the appropriate FP_TO*INT operation to use. MVT NewOutTy = DestVT; @@ -5544,13 +5544,13 @@ SDOperand SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDOperand LegalOp, // Okay, we found the operation and type to use. - SDOperand Operation = DAG.getNode(OpToUse, NewOutTy, LegalOp); + SDValue Operation = DAG.getNode(OpToUse, NewOutTy, LegalOp); // If the operation produces an invalid type, it must be custom lowered. Use // the target lowering hooks to expand it. Just keep the low part of the // expanded operation, we know that we're truncating anyway. if (getTypeAction(NewOutTy) == Expand) { - Operation = SDOperand(TLI.ReplaceNodeResults(Operation.Val, DAG), 0); + Operation = SDValue(TLI.ReplaceNodeResults(Operation.Val, DAG), 0); assert(Operation.Val && "Didn't return anything"); } @@ -5561,10 +5561,10 @@ SDOperand SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDOperand LegalOp, /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation. /// -SDOperand SelectionDAGLegalize::ExpandBSWAP(SDOperand Op) { +SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op) { MVT VT = Op.getValueType(); MVT SHVT = TLI.getShiftAmountTy(); - SDOperand Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; + SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8; switch (VT.getSimpleVT()) { default: assert(0 && "Unhandled Expand type in BSWAP!"); abort(); case MVT::i16: @@ -5608,7 +5608,7 @@ SDOperand SelectionDAGLegalize::ExpandBSWAP(SDOperand Op) { /// ExpandBitCount - Expand the specified bitcount instruction into operations. /// -SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { +SDValue SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDValue Op) { switch (Opc) { default: assert(0 && "Cannot expand this yet!"); case ISD::CTPOP: { @@ -5622,8 +5622,8 @@ SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { unsigned len = VT.getSizeInBits(); for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8]) - SDOperand Tmp2 = DAG.getConstant(mask[i], VT); - SDOperand Tmp3 = DAG.getConstant(1ULL << i, ShVT); + SDValue Tmp2 = DAG.getConstant(mask[i], VT); + SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); Op = DAG.getNode(ISD::ADD, VT, DAG.getNode(ISD::AND, VT, Op, Tmp2), DAG.getNode(ISD::AND, VT, DAG.getNode(ISD::SRL, VT, Op, Tmp3),Tmp2)); @@ -5644,7 +5644,7 @@ SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { MVT ShVT = TLI.getShiftAmountTy(); unsigned len = VT.getSizeInBits(); for (unsigned i = 0; (1U << i) <= (len / 2); ++i) { - SDOperand Tmp3 = DAG.getConstant(1ULL << i, ShVT); + SDValue Tmp3 = DAG.getConstant(1ULL << i, ShVT); Op = DAG.getNode(ISD::OR, VT, Op, DAG.getNode(ISD::SRL, VT, Op, Tmp3)); } Op = DAG.getNode(ISD::XOR, VT, Op, DAG.getConstant(~0ULL, VT)); @@ -5656,8 +5656,8 @@ SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { // { return 32 - nlz(~x & (x-1)); } // see also http://www.hackersdelight.org/HDcode/ntz.cc MVT VT = Op.getValueType(); - SDOperand Tmp2 = DAG.getConstant(~0ULL, VT); - SDOperand Tmp3 = DAG.getNode(ISD::AND, VT, + SDValue Tmp2 = DAG.getConstant(~0ULL, VT); + SDValue Tmp3 = DAG.getNode(ISD::AND, VT, DAG.getNode(ISD::XOR, VT, Op, Tmp2), DAG.getNode(ISD::SUB, VT, Op, DAG.getConstant(1, VT))); // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. @@ -5671,12 +5671,12 @@ SDOperand SelectionDAGLegalize::ExpandBitCount(unsigned Opc, SDOperand Op) { } } -/// ExpandOp - Expand the specified SDOperand into its two component pieces +/// ExpandOp - Expand the specified SDValue into its two component pieces /// Lo&Hi. Note that the Op MUST be an expanded type. As a result of this, the /// LegalizeNodes map is filled in for any results that are not expanded, the /// ExpandedNodes map is filled in for any results that are expanded, and the /// Lo/Hi values are returned. -void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ +void SelectionDAGLegalize::ExpandOp(SDValue Op, SDValue &Lo, SDValue &Hi){ MVT VT = Op.getValueType(); MVT NVT = TLI.getTypeToTransformTo(VT); SDNode *Node = Op.Val; @@ -5685,7 +5685,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ VT.isVector()) && "Cannot expand to FP value or to larger int value!"); // See if we already expanded it. - DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator I + DenseMap<SDValue, std::pair<SDValue, SDValue> >::iterator I = ExpandedNodes.find(Op); if (I != ExpandedNodes.end()) { Lo = I->second.first; @@ -5700,10 +5700,10 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ if (VT == MVT::ppcf128 && TLI.getOperationAction(ISD::FP_ROUND_INREG, VT) == TargetLowering::Custom) { - SDOperand SrcLo, SrcHi, Src; + SDValue SrcLo, SrcHi, Src; ExpandOp(Op.getOperand(0), SrcLo, SrcHi); Src = DAG.getNode(ISD::BUILD_PAIR, VT, SrcLo, SrcHi); - SDOperand Result = TLI.LowerOperation( + SDValue Result = TLI.LowerOperation( DAG.getNode(ISD::FP_ROUND_INREG, VT, Src, Op.getOperand(1)), DAG); assert(Result.Val->getOpcode() == ISD::BUILD_PAIR); Lo = Result.Val->getOperand(0); @@ -5787,7 +5787,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::BSWAP: { ExpandOp(Node->getOperand(0), Lo, Hi); - SDOperand TempLo = DAG.getNode(ISD::BSWAP, NVT, Hi); + SDValue TempLo = DAG.getNode(ISD::BSWAP, NVT, Hi); Hi = DAG.getNode(ISD::BSWAP, NVT, Lo); Lo = TempLo; break; @@ -5804,11 +5804,11 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::CTLZ: { // ctlz (HL) -> ctlz(H) != 32 ? ctlz(H) : (ctlz(L)+32) ExpandOp(Node->getOperand(0), Lo, Hi); - SDOperand BitsC = DAG.getConstant(NVT.getSizeInBits(), NVT); - SDOperand HLZ = DAG.getNode(ISD::CTLZ, NVT, Hi); - SDOperand TopNotZero = DAG.getSetCC(TLI.getSetCCResultType(HLZ), HLZ, BitsC, + SDValue BitsC = DAG.getConstant(NVT.getSizeInBits(), NVT); + SDValue HLZ = DAG.getNode(ISD::CTLZ, NVT, Hi); + SDValue TopNotZero = DAG.getSetCC(TLI.getSetCCResultType(HLZ), HLZ, BitsC, ISD::SETNE); - SDOperand LowPart = DAG.getNode(ISD::CTLZ, NVT, Lo); + SDValue LowPart = DAG.getNode(ISD::CTLZ, NVT, Lo); LowPart = DAG.getNode(ISD::ADD, NVT, LowPart, BitsC); Lo = DAG.getNode(ISD::SELECT, NVT, TopNotZero, HLZ, LowPart); @@ -5819,11 +5819,11 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::CTTZ: { // cttz (HL) -> cttz(L) != 32 ? cttz(L) : (cttz(H)+32) ExpandOp(Node->getOperand(0), Lo, Hi); - SDOperand BitsC = DAG.getConstant(NVT.getSizeInBits(), NVT); - SDOperand LTZ = DAG.getNode(ISD::CTTZ, NVT, Lo); - SDOperand BotNotZero = DAG.getSetCC(TLI.getSetCCResultType(LTZ), LTZ, BitsC, + SDValue BitsC = DAG.getConstant(NVT.getSizeInBits(), NVT); + SDValue LTZ = DAG.getNode(ISD::CTTZ, NVT, Lo); + SDValue BotNotZero = DAG.getSetCC(TLI.getSetCCResultType(LTZ), LTZ, BitsC, ISD::SETNE); - SDOperand HiPart = DAG.getNode(ISD::CTTZ, NVT, Hi); + SDValue HiPart = DAG.getNode(ISD::CTTZ, NVT, Hi); HiPart = DAG.getNode(ISD::ADD, NVT, HiPart, BitsC); Lo = DAG.getNode(ISD::SELECT, NVT, BotNotZero, LTZ, HiPart); @@ -5832,8 +5832,8 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ } case ISD::VAARG: { - SDOperand Ch = Node->getOperand(0); // Legalize the chain. - SDOperand Ptr = Node->getOperand(1); // Legalize the pointer. + SDValue Ch = Node->getOperand(0); // Legalize the chain. + SDValue Ptr = Node->getOperand(1); // Legalize the pointer. Lo = DAG.getVAArg(NVT, Ch, Ptr, Node->getOperand(2)); Hi = DAG.getVAArg(NVT, Lo.getValue(1), Ptr, Node->getOperand(2)); @@ -5847,8 +5847,8 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::LOAD: { LoadSDNode *LD = cast<LoadSDNode>(Node); - SDOperand Ch = LD->getChain(); // Legalize the chain. - SDOperand Ptr = LD->getBasePtr(); // Legalize the pointer. + SDValue Ch = LD->getChain(); // Legalize the chain. + SDValue Ptr = LD->getBasePtr(); // Legalize the pointer. ISD::LoadExtType ExtType = LD->getExtensionType(); int SVOffset = LD->getSrcValueOffset(); unsigned Alignment = LD->getAlignment(); @@ -5860,7 +5860,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ if (VT == MVT::f32 || VT == MVT::f64) { // f32->i32 or f64->i64 one to one expansion. // Remember that we legalized the chain. - AddLegalizedOperand(SDOperand(Node, 1), LegalizeOp(Lo.getValue(1))); + AddLegalizedOperand(SDValue(Node, 1), LegalizeOp(Lo.getValue(1))); // Recursively expand the new load. if (getTypeAction(NVT) == Expand) ExpandOp(Lo, Lo, Hi); @@ -5878,7 +5878,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ // Build a factor node to remember that this load is independent of the // other one. - SDOperand TF = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1), + SDValue TF = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1), Hi.getValue(1)); // Remember that we legalized the chain. @@ -5891,10 +5891,10 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ if ((VT == MVT::f64 && EVT == MVT::f32) || (VT == MVT::ppcf128 && (EVT==MVT::f64 || EVT==MVT::f32))) { // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND - SDOperand Load = DAG.getLoad(EVT, Ch, Ptr, LD->getSrcValue(), + SDValue Load = DAG.getLoad(EVT, Ch, Ptr, LD->getSrcValue(), SVOffset, isVolatile, Alignment); // Remember that we legalized the chain. - AddLegalizedOperand(SDOperand(Node, 1), LegalizeOp(Load.getValue(1))); + AddLegalizedOperand(SDValue(Node, 1), LegalizeOp(Load.getValue(1))); ExpandOp(DAG.getNode(ISD::FP_EXTEND, VT, Load), Lo, Hi); break; } @@ -5908,7 +5908,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ Alignment); // Remember that we legalized the chain. - AddLegalizedOperand(SDOperand(Node, 1), LegalizeOp(Lo.getValue(1))); + AddLegalizedOperand(SDValue(Node, 1), LegalizeOp(Lo.getValue(1))); if (ExtType == ISD::SEXTLOAD) { // The high part is obtained by SRA'ing all but one of the bits of the @@ -5929,7 +5929,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::AND: case ISD::OR: case ISD::XOR: { // Simple logical operators -> two trivial pieces. - SDOperand LL, LH, RL, RH; + SDValue LL, LH, RL, RH; ExpandOp(Node->getOperand(0), LL, LH); ExpandOp(Node->getOperand(1), RL, RH); Lo = DAG.getNode(Node->getOpcode(), NVT, LL, RL); @@ -5937,7 +5937,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ break; } case ISD::SELECT: { - SDOperand LL, LH, RL, RH; + SDValue LL, LH, RL, RH; ExpandOp(Node->getOperand(1), LL, LH); ExpandOp(Node->getOperand(2), RL, RH); if (getTypeAction(NVT) == Expand) @@ -5948,7 +5948,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ break; } case ISD::SELECT_CC: { - SDOperand TL, TH, FL, FH; + SDValue TL, TH, FL, FH; ExpandOp(Node->getOperand(2), TL, TH); ExpandOp(Node->getOperand(3), FL, FH); if (getTypeAction(NVT) == Expand) @@ -5989,7 +5989,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::TRUNCATE: { // The input value must be larger than this value. Expand *it*. - SDOperand NewLo; + SDValue NewLo; ExpandOp(Node->getOperand(0), NewLo, Hi); // The low part is now either the right size, or it is closer. If not the @@ -6001,7 +6001,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ } case ISD::BIT_CONVERT: { - SDOperand Tmp; + SDValue Tmp; if (TLI.getOperationAction(ISD::BIT_CONVERT, VT) == TargetLowering::Custom){ // If the target wants to, allow it to lower this itself. switch (getTypeAction(Node->getOperand(0).getValueType())) { @@ -6040,19 +6040,19 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ assert(TLI.getOperationAction(ISD::READCYCLECOUNTER, VT) == TargetLowering::Custom && "Must custom expand ReadCycleCounter"); - SDOperand Tmp = TLI.LowerOperation(Op, DAG); + SDValue Tmp = TLI.LowerOperation(Op, DAG); assert(Tmp.Val && "Node must be custom expanded!"); ExpandOp(Tmp.getValue(0), Lo, Hi); - AddLegalizedOperand(SDOperand(Node, 1), // Remember we legalized the chain. + AddLegalizedOperand(SDValue(Node, 1), // Remember we legalized the chain. LegalizeOp(Tmp.getValue(1))); break; } case ISD::ATOMIC_CMP_SWAP: { - SDOperand Tmp = TLI.LowerOperation(Op, DAG); + SDValue Tmp = TLI.LowerOperation(Op, DAG); assert(Tmp.Val && "Node must be custom expanded!"); ExpandOp(Tmp.getValue(0), Lo, Hi); - AddLegalizedOperand(SDOperand(Node, 1), // Remember we legalized the chain. + AddLegalizedOperand(SDValue(Node, 1), // Remember we legalized the chain. LegalizeOp(Tmp.getValue(1))); break; } @@ -6063,7 +6063,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ // library functions. case ISD::FP_TO_SINT: { if (TLI.getOperationAction(ISD::FP_TO_SINT, VT) == TargetLowering::Custom) { - SDOperand Op; + SDValue Op; switch (getTypeAction(Node->getOperand(0).getValueType())) { case Expand: assert(0 && "cannot expand FP!"); case Legal: Op = LegalizeOp(Node->getOperand(0)); break; @@ -6089,7 +6089,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::FP_TO_UINT: { if (TLI.getOperationAction(ISD::FP_TO_UINT, VT) == TargetLowering::Custom) { - SDOperand Op; + SDValue Op; switch (getTypeAction(Node->getOperand(0).getValueType())) { case Expand: assert(0 && "cannot expand FP!"); case Legal: Op = LegalizeOp(Node->getOperand(0)); break; @@ -6114,9 +6114,9 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::SHL: { // If the target wants custom lowering, do so. - SDOperand ShiftAmt = LegalizeOp(Node->getOperand(1)); + SDValue ShiftAmt = LegalizeOp(Node->getOperand(1)); if (TLI.getOperationAction(ISD::SHL, VT) == TargetLowering::Custom) { - SDOperand Op = DAG.getNode(ISD::SHL, VT, Node->getOperand(0), ShiftAmt); + SDValue Op = DAG.getNode(ISD::SHL, VT, Node->getOperand(0), ShiftAmt); Op = TLI.LowerOperation(Op, DAG); if (Op.Val) { // Now that the custom expander is done, expand the result, which is @@ -6131,7 +6131,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(ShiftAmt)) { if (ShAmt->getAPIntValue() == 1 && TLI.isOperationLegal(ISD::ADDC, NVT) && TLI.isOperationLegal(ISD::ADDE, NVT)) { - SDOperand LoOps[2], HiOps[3]; + SDValue LoOps[2], HiOps[3]; ExpandOp(Node->getOperand(0), LoOps[0], HiOps[0]); SDVTList VTList = DAG.getVTList(LoOps[0].getValueType(), MVT::Flag); LoOps[1] = LoOps[0]; @@ -6164,9 +6164,9 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::SRA: { // If the target wants custom lowering, do so. - SDOperand ShiftAmt = LegalizeOp(Node->getOperand(1)); + SDValue ShiftAmt = LegalizeOp(Node->getOperand(1)); if (TLI.getOperationAction(ISD::SRA, VT) == TargetLowering::Custom) { - SDOperand Op = DAG.getNode(ISD::SRA, VT, Node->getOperand(0), ShiftAmt); + SDValue Op = DAG.getNode(ISD::SRA, VT, Node->getOperand(0), ShiftAmt); Op = TLI.LowerOperation(Op, DAG); if (Op.Val) { // Now that the custom expander is done, expand the result, which is @@ -6196,9 +6196,9 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::SRL: { // If the target wants custom lowering, do so. - SDOperand ShiftAmt = LegalizeOp(Node->getOperand(1)); + SDValue ShiftAmt = LegalizeOp(Node->getOperand(1)); if (TLI.getOperationAction(ISD::SRL, VT) == TargetLowering::Custom) { - SDOperand Op = DAG.getNode(ISD::SRL, VT, Node->getOperand(0), ShiftAmt); + SDValue Op = DAG.getNode(ISD::SRL, VT, Node->getOperand(0), ShiftAmt); Op = TLI.LowerOperation(Op, DAG); if (Op.Val) { // Now that the custom expander is done, expand the result, which is @@ -6231,7 +6231,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ // If the target wants to custom expand this, let them. if (TLI.getOperationAction(Node->getOpcode(), VT) == TargetLowering::Custom) { - SDOperand Result = TLI.LowerOperation(Op, DAG); + SDValue Result = TLI.LowerOperation(Op, DAG); if (Result.Val) { ExpandOp(Result, Lo, Hi); break; @@ -6239,11 +6239,11 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ } // Expand the subcomponents. - SDOperand LHSL, LHSH, RHSL, RHSH; + SDValue LHSL, LHSH, RHSL, RHSH; ExpandOp(Node->getOperand(0), LHSL, LHSH); ExpandOp(Node->getOperand(1), RHSL, RHSH); SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag); - SDOperand LoOps[2], HiOps[3]; + SDValue LoOps[2], HiOps[3]; LoOps[0] = LHSL; LoOps[1] = RHSL; HiOps[0] = LHSH; @@ -6263,12 +6263,12 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::ADDC: case ISD::SUBC: { // Expand the subcomponents. - SDOperand LHSL, LHSH, RHSL, RHSH; + SDValue LHSL, LHSH, RHSL, RHSH; ExpandOp(Node->getOperand(0), LHSL, LHSH); ExpandOp(Node->getOperand(1), RHSL, RHSH); SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag); - SDOperand LoOps[2] = { LHSL, RHSL }; - SDOperand HiOps[3] = { LHSH, RHSH }; + SDValue LoOps[2] = { LHSL, RHSL }; + SDValue HiOps[3] = { LHSH, RHSH }; if (Node->getOpcode() == ISD::ADDC) { Lo = DAG.getNode(ISD::ADDC, VTList, LoOps, 2); @@ -6286,12 +6286,12 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::ADDE: case ISD::SUBE: { // Expand the subcomponents. - SDOperand LHSL, LHSH, RHSL, RHSH; + SDValue LHSL, LHSH, RHSL, RHSH; ExpandOp(Node->getOperand(0), LHSL, LHSH); ExpandOp(Node->getOperand(1), RHSL, RHSH); SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag); - SDOperand LoOps[3] = { LHSL, RHSL, Node->getOperand(2) }; - SDOperand HiOps[3] = { LHSH, RHSH }; + SDValue LoOps[3] = { LHSL, RHSL, Node->getOperand(2) }; + SDValue HiOps[3] = { LHSH, RHSH }; Lo = DAG.getNode(Node->getOpcode(), VTList, LoOps, 3); HiOps[2] = Lo.getValue(1); @@ -6304,7 +6304,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ case ISD::MUL: { // If the target wants to custom expand this, let them. if (TLI.getOperationAction(ISD::MUL, VT) == TargetLowering::Custom) { - SDOperand New = TLI.LowerOperation(Op, DAG); + SDValue New = TLI.LowerOperation(Op, DAG); if (New.Val) { ExpandOp(New, Lo, Hi); break; @@ -6316,7 +6316,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ bool HasSMUL_LOHI = TLI.isOperationLegal(ISD::SMUL_LOHI, NVT); bool HasUMUL_LOHI = TLI.isOperationLegal(ISD::UMUL_LOHI, NVT); if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) { - SDOperand LL, LH, RL, RH; + SDValue LL, LH, RL, RH; ExpandOp(Node->getOperand(0), LL, LH); ExpandOp(Node->getOperand(1), RL, RH); unsigned OuterBitSize = Op.getValueSizeInBits(); @@ -6330,7 +6330,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ if (HasUMUL_LOHI) { // We can emit a umul_lohi. Lo = DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL); - Hi = SDOperand(Lo.Val, 1); + Hi = SDValue(Lo.Val, 1); break; } if (HasMULHU) { @@ -6345,7 +6345,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ if (HasSMUL_LOHI) { // We can emit a smul_lohi. Lo = DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL); - Hi = SDOperand(Lo.Val, 1); + Hi = SDValue(Lo.Val, 1); break; } if (HasMULHS) { @@ -6357,7 +6357,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ } if (HasUMUL_LOHI) { // Lo,Hi = umul LHS, RHS. - SDOperand UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, + SDValue UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL); Lo = UMulLOHI; Hi = UMulLOHI.getValue(1); @@ -6478,7 +6478,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ } case ISD::FABS: { if (VT == MVT::ppcf128) { - SDOperand Tmp; + SDValue Tmp; ExpandOp(Node->getOperand(0), Lo, Tmp); Hi = DAG.getNode(ISD::FABS, NVT, Tmp); // lo = hi==fabs(hi) ? lo : -lo; @@ -6487,7 +6487,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ DAG.getCondCode(ISD::SETEQ)); break; } - SDOperand Mask = (VT == MVT::f64) + SDValue Mask = (VT == MVT::f64) ? DAG.getConstantFP(BitsToDouble(~(1ULL << 63)), VT) : DAG.getConstantFP(BitsToFloat(~(1U << 31)), VT); Mask = DAG.getNode(ISD::BIT_CONVERT, NVT, Mask); @@ -6504,7 +6504,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ Hi = DAG.getNode(ISD::FNEG, MVT::f64, Hi); break; } - SDOperand Mask = (VT == MVT::f64) + SDValue Mask = (VT == MVT::f64) ? DAG.getConstantFP(BitsToDouble(1ULL << 63), VT) : DAG.getConstantFP(BitsToFloat(1U << 31), VT); Mask = DAG.getNode(ISD::BIT_CONVERT, NVT, Mask); @@ -6528,7 +6528,7 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ // Promote the operand if needed. Do this before checking for // ppcf128 so conversions of i16 and i8 work. if (getTypeAction(SrcVT) == Promote) { - SDOperand Tmp = PromoteOp(Node->getOperand(0)); + SDValue Tmp = PromoteOp(Node->getOperand(0)); Tmp = isSigned ? DAG.getNode(ISD::SIGN_EXTEND_INREG, Tmp.getValueType(), Tmp, DAG.getValueType(SrcVT)) @@ -6607,8 +6607,8 @@ void SelectionDAGLegalize::ExpandOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi){ /// SplitVectorOp - Given an operand of vector type, break it down into /// two smaller values, still of vector type. -void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, - SDOperand &Hi) { +void SelectionDAGLegalize::SplitVectorOp(SDValue Op, SDValue &Lo, + SDValue &Hi) { assert(Op.getValueType().isVector() && "Cannot split non-vector type!"); SDNode *Node = Op.Val; unsigned NumElements = Op.getValueType().getVectorNumElements(); @@ -6623,7 +6623,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, MVT NewVT_Hi = MVT::getVectorVT(NewEltVT, NewNumElts_Hi); // See if we already split it. - std::map<SDOperand, std::pair<SDOperand, SDOperand> >::iterator I + std::map<SDValue, std::pair<SDValue, SDValue> >::iterator I = SplitNodes.find(Op); if (I != SplitNodes.end()) { Lo = I->second.first; @@ -6649,7 +6649,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, if (ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Node->getOperand(2))) { SplitVectorOp(Node->getOperand(0), Lo, Hi); unsigned Index = Idx->getValue(); - SDOperand ScalarOp = Node->getOperand(1); + SDValue ScalarOp = Node->getOperand(1); if (Index < NewNumElts_Lo) Lo = DAG.getNode(ISD::INSERT_VECTOR_ELT, NewVT_Lo, Lo, ScalarOp, DAG.getIntPtrConstant(Index)); @@ -6658,7 +6658,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, DAG.getIntPtrConstant(Index - NewNumElts_Lo)); break; } - SDOperand Tmp = PerformInsertVectorEltInMemory(Node->getOperand(0), + SDValue Tmp = PerformInsertVectorEltInMemory(Node->getOperand(0), Node->getOperand(1), Node->getOperand(2)); SplitVectorOp(Tmp, Lo, Hi); @@ -6666,21 +6666,21 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, } case ISD::VECTOR_SHUFFLE: { // Build the low part. - SDOperand Mask = Node->getOperand(2); - SmallVector<SDOperand, 8> Ops; + SDValue Mask = Node->getOperand(2); + SmallVector<SDValue, 8> Ops; MVT PtrVT = TLI.getPointerTy(); // Insert all of the elements from the input that are needed. We use // buildvector of extractelement here because the input vectors will have // to be legalized, so this makes the code simpler. for (unsigned i = 0; i != NewNumElts_Lo; ++i) { - SDOperand IdxNode = Mask.getOperand(i); + SDValue IdxNode = Mask.getOperand(i); if (IdxNode.getOpcode() == ISD::UNDEF) { Ops.push_back(DAG.getNode(ISD::UNDEF, NewEltVT)); continue; } unsigned Idx = cast<ConstantSDNode>(IdxNode)->getValue(); - SDOperand InVec = Node->getOperand(0); + SDValue InVec = Node->getOperand(0); if (Idx >= NumElements) { InVec = Node->getOperand(1); Idx -= NumElements; @@ -6692,13 +6692,13 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, Ops.clear(); for (unsigned i = NewNumElts_Lo; i != NumElements; ++i) { - SDOperand IdxNode = Mask.getOperand(i); + SDValue IdxNode = Mask.getOperand(i); if (IdxNode.getOpcode() == ISD::UNDEF) { Ops.push_back(DAG.getNode(ISD::UNDEF, NewEltVT)); continue; } unsigned Idx = cast<ConstantSDNode>(IdxNode)->getValue(); - SDOperand InVec = Node->getOperand(0); + SDValue InVec = Node->getOperand(0); if (Idx >= NumElements) { InVec = Node->getOperand(1); Idx -= NumElements; @@ -6710,11 +6710,11 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, break; } case ISD::BUILD_VECTOR: { - SmallVector<SDOperand, 8> LoOps(Node->op_begin(), + SmallVector<SDValue, 8> LoOps(Node->op_begin(), Node->op_begin()+NewNumElts_Lo); Lo = DAG.getNode(ISD::BUILD_VECTOR, NewVT_Lo, &LoOps[0], LoOps.size()); - SmallVector<SDOperand, 8> HiOps(Node->op_begin()+NewNumElts_Lo, + SmallVector<SDValue, 8> HiOps(Node->op_begin()+NewNumElts_Lo, Node->op_end()); Hi = DAG.getNode(ISD::BUILD_VECTOR, NewVT_Hi, &HiOps[0], HiOps.size()); break; @@ -6726,26 +6726,26 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, Lo = Node->getOperand(0); Hi = Node->getOperand(1); } else { - SmallVector<SDOperand, 8> LoOps(Node->op_begin(), + SmallVector<SDValue, 8> LoOps(Node->op_begin(), Node->op_begin()+NewNumSubvectors); Lo = DAG.getNode(ISD::CONCAT_VECTORS, NewVT_Lo, &LoOps[0], LoOps.size()); - SmallVector<SDOperand, 8> HiOps(Node->op_begin()+NewNumSubvectors, + SmallVector<SDValue, 8> HiOps(Node->op_begin()+NewNumSubvectors, Node->op_end()); Hi = DAG.getNode(ISD::CONCAT_VECTORS, NewVT_Hi, &HiOps[0], HiOps.size()); } break; } case ISD::SELECT: { - SDOperand Cond = Node->getOperand(0); + SDValue Cond = Node->getOperand(0); - SDOperand LL, LH, RL, RH; + SDValue LL, LH, RL, RH; SplitVectorOp(Node->getOperand(1), LL, LH); SplitVectorOp(Node->getOperand(2), RL, RH); if (Cond.getValueType().isVector()) { // Handle a vector merge. - SDOperand CL, CH; + SDValue CL, CH; SplitVectorOp(Cond, CL, CH); Lo = DAG.getNode(Node->getOpcode(), NewVT_Lo, CL, LL, RL); Hi = DAG.getNode(Node->getOpcode(), NewVT_Hi, CH, LH, RH); @@ -6757,11 +6757,11 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, break; } case ISD::SELECT_CC: { - SDOperand CondLHS = Node->getOperand(0); - SDOperand CondRHS = Node->getOperand(1); - SDOperand CondCode = Node->getOperand(4); + SDValue CondLHS = Node->getOperand(0); + SDValue CondRHS = Node->getOperand(1); + SDValue CondCode = Node->getOperand(4); - SDOperand LL, LH, RL, RH; + SDValue LL, LH, RL, RH; SplitVectorOp(Node->getOperand(2), LL, LH); SplitVectorOp(Node->getOperand(3), RL, RH); @@ -6773,7 +6773,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, break; } case ISD::VSETCC: { - SDOperand LL, LH, RL, RH; + SDValue LL, LH, RL, RH; SplitVectorOp(Node->getOperand(0), LL, LH); SplitVectorOp(Node->getOperand(1), RL, RH); Lo = DAG.getNode(ISD::VSETCC, NewVT_Lo, LL, RL, Node->getOperand(2)); @@ -6796,7 +6796,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, case ISD::UREM: case ISD::SREM: case ISD::FREM: { - SDOperand LL, LH, RL, RH; + SDValue LL, LH, RL, RH; SplitVectorOp(Node->getOperand(0), LL, LH); SplitVectorOp(Node->getOperand(1), RL, RH); @@ -6805,7 +6805,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, break; } case ISD::FPOWI: { - SDOperand L, H; + SDValue L, H; SplitVectorOp(Node->getOperand(0), L, H); Lo = DAG.getNode(Node->getOpcode(), NewVT_Lo, L, Node->getOperand(1)); @@ -6824,7 +6824,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, case ISD::FP_TO_UINT: case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: { - SDOperand L, H; + SDValue L, H; SplitVectorOp(Node->getOperand(0), L, H); Lo = DAG.getNode(Node->getOpcode(), NewVT_Lo, L); @@ -6833,8 +6833,8 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, } case ISD::LOAD: { LoadSDNode *LD = cast<LoadSDNode>(Node); - SDOperand Ch = LD->getChain(); - SDOperand Ptr = LD->getBasePtr(); + SDValue Ch = LD->getChain(); + SDValue Ptr = LD->getBasePtr(); const Value *SV = LD->getSrcValue(); int SVOffset = LD->getSrcValueOffset(); unsigned Alignment = LD->getAlignment(); @@ -6850,7 +6850,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, // Build a factor node to remember that this load is independent of the // other one. - SDOperand TF = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1), + SDValue TF = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1), Hi.getValue(1)); // Remember that we legalized the chain. @@ -6860,7 +6860,7 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, case ISD::BIT_CONVERT: { // We know the result is a vector. The input may be either a vector or a // scalar value. - SDOperand InOp = Node->getOperand(0); + SDValue InOp = Node->getOperand(0); if (!InOp.getValueType().isVector() || InOp.getValueType().getVectorNumElements() == 1) { // The input is a scalar or single-element vector. @@ -6868,10 +6868,10 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, // FIXME: this could be improved probably. unsigned LdAlign = TLI.getTargetData()->getPrefTypeAlignment( Op.getValueType().getTypeForMVT()); - SDOperand Ptr = DAG.CreateStackTemporary(InOp.getValueType(), LdAlign); + SDValue Ptr = DAG.CreateStackTemporary(InOp.getValueType(), LdAlign); int FI = cast<FrameIndexSDNode>(Ptr.Val)->getIndex(); - SDOperand St = DAG.getStore(DAG.getEntryNode(), + SDValue St = DAG.getStore(DAG.getEntryNode(), InOp, Ptr, PseudoSourceValue::getFixedStack(FI), 0); InOp = DAG.getLoad(Op.getValueType(), St, Ptr, @@ -6895,17 +6895,17 @@ void SelectionDAGLegalize::SplitVectorOp(SDOperand Op, SDOperand &Lo, /// ScalarizeVectorOp - Given an operand of single-element vector type /// (e.g. v1f32), convert it into the equivalent operation that returns a /// scalar (e.g. f32) value. -SDOperand SelectionDAGLegalize::ScalarizeVectorOp(SDOperand Op) { +SDValue SelectionDAGLegalize::ScalarizeVectorOp(SDValue Op) { assert(Op.getValueType().isVector() && "Bad ScalarizeVectorOp invocation!"); SDNode *Node = Op.Val; MVT NewVT = Op.getValueType().getVectorElementType(); assert(Op.getValueType().getVectorNumElements() == 1); // See if we already scalarized it. - std::map<SDOperand, SDOperand>::iterator I = ScalarizedNodes.find(Op); + std::map<SDValue, SDValue>::iterator I = ScalarizedNodes.find(Op); if (I != ScalarizedNodes.end()) return I->second; - SDOperand Result; + SDValue Result; switch (Node->getOpcode()) { default: #ifndef NDEBUG @@ -6950,8 +6950,8 @@ SDOperand SelectionDAGLegalize::ScalarizeVectorOp(SDOperand Op) { break; case ISD::LOAD: { LoadSDNode *LD = cast<LoadSDNode>(Node); - SDOperand Ch = LegalizeOp(LD->getChain()); // Legalize the chain. - SDOperand Ptr = LegalizeOp(LD->getBasePtr()); // Legalize the pointer. + SDValue Ch = LegalizeOp(LD->getChain()); // Legalize the chain. + SDValue Ptr = LegalizeOp(LD->getBasePtr()); // Legalize the pointer. const Value *SV = LD->getSrcValue(); int SVOffset = LD->getSrcValueOffset(); @@ -6976,7 +6976,7 @@ SDOperand SelectionDAGLegalize::ScalarizeVectorOp(SDOperand Op) { break; case ISD::VECTOR_SHUFFLE: { // Figure out if the scalar is the LHS or RHS and return it. - SDOperand EltNum = Node->getOperand(2).getOperand(0); + SDValue EltNum = Node->getOperand(2).getOperand(0); if (cast<ConstantSDNode>(EltNum)->getValue()) Result = ScalarizeVectorOp(Node->getOperand(1)); else @@ -6988,7 +6988,7 @@ SDOperand SelectionDAGLegalize::ScalarizeVectorOp(SDOperand Op) { assert(Result.getValueType() == NewVT); break; case ISD::BIT_CONVERT: { - SDOperand Op0 = Op.getOperand(0); + SDValue Op0 = Op.getOperand(0); if (Op0.getValueType().getVectorNumElements() == 1) Op0 = ScalarizeVectorOp(Op0); Result = DAG.getNode(ISD::BIT_CONVERT, NewVT, Op0); @@ -7007,8 +7007,8 @@ SDOperand SelectionDAGLegalize::ScalarizeVectorOp(SDOperand Op) { Node->getOperand(4)); break; case ISD::VSETCC: { - SDOperand Op0 = ScalarizeVectorOp(Op.getOperand(0)); - SDOperand Op1 = ScalarizeVectorOp(Op.getOperand(1)); + SDValue Op0 = ScalarizeVectorOp(Op.getOperand(0)); + SDValue Op1 = ScalarizeVectorOp(Op.getOperand(1)); Result = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(Op0), Op0, Op1, Op.getOperand(2)); Result = DAG.getNode(ISD::SELECT, NewVT, Result, diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index e50ff1c991..15d0838598 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -43,7 +43,7 @@ static RTLIB::Libcall GetFPLibCall(MVT VT, void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) { DEBUG(cerr << "Soften float result " << ResNo << ": "; N->dump(&DAG); cerr << "\n"); - SDOperand R = SDOperand(); + SDValue R = SDValue(); switch (N->getOpcode()) { default: @@ -76,14 +76,14 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) { // If R is null, the sub-method took care of registering the result. if (R.Val) - SetSoftenedFloat(SDOperand(N, ResNo), R); + SetSoftenedFloat(SDValue(N, ResNo), R); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_BIT_CONVERT(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_BIT_CONVERT(SDNode *N) { return BitConvertToInteger(N->getOperand(0)); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_BUILD_PAIR(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_BUILD_PAIR(SDNode *N) { // Convert the inputs to integers, and build a new pair out of them. return DAG.getNode(ISD::BUILD_PAIR, TLI.getTypeToTransformTo(N->getValueType(0)), @@ -91,14 +91,14 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_BUILD_PAIR(SDNode *N) { BitConvertToInteger(N->getOperand(1))); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_ConstantFP(ConstantFPSDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_ConstantFP(ConstantFPSDNode *N) { return DAG.getConstant(N->getValueAPF().convertToAPInt(), TLI.getTypeToTransformTo(N->getValueType(0))); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_FADD(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_FADD(SDNode *N) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Ops[2] = { GetSoftenedFloat(N->getOperand(0)), + SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)), GetSoftenedFloat(N->getOperand(1)) }; return MakeLibCall(GetFPLibCall(N->getValueType(0), RTLIB::ADD_F32, @@ -108,9 +108,9 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_FADD(SDNode *N) { NVT, Ops, 2, false); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) { - SDOperand LHS = GetSoftenedFloat(N->getOperand(0)); - SDOperand RHS = BitConvertToInteger(N->getOperand(1)); +SDValue DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) { + SDValue LHS = GetSoftenedFloat(N->getOperand(0)); + SDValue RHS = BitConvertToInteger(N->getOperand(1)); MVT LVT = LHS.getValueType(); MVT RVT = RHS.getValueType(); @@ -119,7 +119,7 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) { unsigned RSize = RVT.getSizeInBits(); // First get the sign bit of second operand. - SDOperand SignBit = DAG.getNode(ISD::SHL, RVT, DAG.getConstant(1, RVT), + SDValue SignBit = DAG.getNode(ISD::SHL, RVT, DAG.getConstant(1, RVT), DAG.getConstant(RSize - 1, TLI.getShiftAmountTy())); SignBit = DAG.getNode(ISD::AND, RVT, RHS, SignBit); @@ -137,7 +137,7 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) { } // Clear the sign bit of the first operand. - SDOperand Mask = DAG.getNode(ISD::SHL, LVT, DAG.getConstant(1, LVT), + SDValue Mask = DAG.getNode(ISD::SHL, LVT, DAG.getConstant(1, LVT), DAG.getConstant(LSize - 1, TLI.getShiftAmountTy())); Mask = DAG.getNode(ISD::SUB, LVT, Mask, DAG.getConstant(1, LVT)); @@ -147,9 +147,9 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_FCOPYSIGN(SDNode *N) { return DAG.getNode(ISD::OR, LVT, LHS, SignBit); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_FDIV(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_FDIV(SDNode *N) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Ops[2] = { GetSoftenedFloat(N->getOperand(0)), + SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)), GetSoftenedFloat(N->getOperand(1)) }; return MakeLibCall(GetFPLibCall(N->getValueType(0), RTLIB::DIV_F32, @@ -159,9 +159,9 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_FDIV(SDNode *N) { NVT, Ops, 2, false); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_FMUL(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_FMUL(SDNode *N) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Ops[2] = { GetSoftenedFloat(N->getOperand(0)), + SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)), GetSoftenedFloat(N->getOperand(1)) }; return MakeLibCall(GetFPLibCall(N->getValueType(0), RTLIB::MUL_F32, @@ -171,25 +171,25 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_FMUL(SDNode *N) { NVT, Ops, 2, false); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_FP_EXTEND(SDNode *N) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Op = N->getOperand(0); + SDValue Op = N->getOperand(0); RTLIB::Libcall LC = RTLIB::getFPEXT(Op.getValueType(), N->getValueType(0)); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_EXTEND!"); return MakeLibCall(LC, NVT, &Op, 1, false); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_FP_ROUND(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_FP_ROUND(SDNode *N) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Op = N->getOperand(0); + SDValue Op = N->getOperand(0); RTLIB::Libcall LC = RTLIB::getFPROUND(Op.getValueType(), N->getValueType(0)); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_ROUND!"); return MakeLibCall(LC, NVT, &Op, 1, false); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Ops[2] = { GetSoftenedFloat(N->getOperand(0)), N->getOperand(1) }; + SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)), N->getOperand(1) }; return MakeLibCall(GetFPLibCall(N->getValueType(0), RTLIB::POWI_F32, RTLIB::POWI_F64, @@ -198,9 +198,9 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) { NVT, Ops, 2, false); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_FSUB(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_FSUB(SDNode *N) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Ops[2] = { GetSoftenedFloat(N->getOperand(0)), + SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)), GetSoftenedFloat(N->getOperand(1)) }; return MakeLibCall(GetFPLibCall(N->getValueType(0), RTLIB::SUB_F32, @@ -210,12 +210,12 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_FSUB(SDNode *N) { NVT, Ops, 2, false); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) { LoadSDNode *L = cast<LoadSDNode>(N); MVT VT = N->getValueType(0); MVT NVT = TLI.getTypeToTransformTo(VT); - SDOperand NewL; + SDValue NewL; if (L->getExtensionType() == ISD::NON_EXTLOAD) { NewL = DAG.getLoad(L->getAddressingMode(), L->getExtensionType(), NVT, L->getChain(), L->getBasePtr(), L->getOffset(), @@ -223,7 +223,7 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) { L->isVolatile(), L->getAlignment()); // Legalized the chain result - switch anything that used the old chain to // use the new one. - ReplaceValueWith(SDOperand(N, 1), NewL.getValue(1)); + ReplaceValueWith(SDValue(N, 1), NewL.getValue(1)); return NewL; } @@ -236,33 +236,33 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N) { L->isVolatile(), L->getAlignment()); // Legalized the chain result - switch anything that used the old chain to // use the new one. - ReplaceValueWith(SDOperand(N, 1), NewL.getValue(1)); + ReplaceValueWith(SDValue(N, 1), NewL.getValue(1)); return BitConvertToInteger(DAG.getNode(ISD::FP_EXTEND, VT, NewL)); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_SELECT(SDNode *N) { - SDOperand LHS = GetSoftenedFloat(N->getOperand(1)); - SDOperand RHS = GetSoftenedFloat(N->getOperand(2)); +SDValue DAGTypeLegalizer::SoftenFloatRes_SELECT(SDNode *N) { + SDValue LHS = GetSoftenedFloat(N->getOperand(1)); + SDValue RHS = GetSoftenedFloat(N->getOperand(2)); return DAG.getNode(ISD::SELECT, LHS.getValueType(), N->getOperand(0),LHS,RHS); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_SELECT_CC(SDNode *N) { - SDOperand LHS = GetSoftenedFloat(N->getOperand(2)); - SDOperand RHS = GetSoftenedFloat(N->getOperand(3)); +SDValue DAGTypeLegalizer::SoftenFloatRes_SELECT_CC(SDNode *N) { + SDValue LHS = GetSoftenedFloat(N->getOperand(2)); + SDValue RHS = GetSoftenedFloat(N->getOperand(3)); return DAG.getNode(ISD::SELECT_CC, LHS.getValueType(), N->getOperand(0), N->getOperand(1), LHS, RHS, N->getOperand(4)); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_SINT_TO_FP(SDNode *N) { - SDOperand Op = N->getOperand(0); +SDValue DAGTypeLegalizer::SoftenFloatRes_SINT_TO_FP(SDNode *N) { + SDValue Op = N->getOperand(0); MVT RVT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::getSINTTOFP(Op.getValueType(), RVT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SINT_TO_FP!"); return MakeLibCall(LC, TLI.getTypeToTransformTo(RVT), &Op, 1, false); } -SDOperand DAGTypeLegalizer::SoftenFloatRes_UINT_TO_FP(SDNode *N) { - SDOperand Op = N->getOperand(0); +SDValue DAGTypeLegalizer::SoftenFloatRes_UINT_TO_FP(SDNode *N) { + SDValue Op = N->getOperand(0); MVT RVT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::getUINTTOFP(Op.getValueType(), RVT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported UINT_TO_FP!"); @@ -277,7 +277,7 @@ SDOperand DAGTypeLegalizer::SoftenFloatRes_UINT_TO_FP(SDNode *N) { bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) { DEBUG(cerr << "Soften float operand " << OpNo << ": "; N->dump(&DAG); cerr << "\n"); - SDOperand Res = SDOperand(); + SDValue Res = SDValue(); switch (N->getOpcode()) { default: @@ -313,16 +313,16 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) { assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 && "Invalid operand expansion"); - ReplaceValueWith(SDOperand(N, 0), Res); + ReplaceValueWith(SDValue(N, 0), Res); return false; } /// SoftenSetCCOperands - Soften the operands of a comparison. This code is /// shared among BR_CC, SELECT_CC, and SETCC handlers. -void DAGTypeLegalizer::SoftenSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS, +void DAGTypeLegalizer::SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode) { - SDOperand LHSInt = GetSoftenedFloat(NewLHS); - SDOperand RHSInt = GetSoftenedFloat(NewRHS); + SDValue LHSInt = GetSoftenedFloat(NewLHS); + SDValue RHSInt = GetSoftenedFloat(NewRHS); MVT VT = NewLHS.getValueType(); assert((VT == MVT::f32 || VT == MVT::f64) && "Unsupported setcc type!"); @@ -387,28 +387,28 @@ void DAGTypeLegalizer::SoftenSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS, } MVT RetVT = MVT::i32; // FIXME: is this the correct return type? - SDOperand Ops[2] = { LHSInt, RHSInt }; + SDValue Ops[2] = { LHSInt, RHSInt }; NewLHS = MakeLibCall(LC1, RetVT, Ops, 2, false/*sign irrelevant*/); NewRHS = DAG.getConstant(0, RetVT); CCCode = TLI.getCmpLibcallCC(LC1); if (LC2 != RTLIB::UNKNOWN_LIBCALL) { - SDOperand Tmp = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(NewLHS), + SDValue Tmp = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(NewLHS), NewLHS, NewRHS, DAG.getCondCode(CCCode)); NewLHS = MakeLibCall(LC2, RetVT, Ops, 2, false/*sign irrelevant*/); NewLHS = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(NewLHS), NewLHS, NewRHS, DAG.getCondCode(TLI.getCmpLibcallCC(LC2))); NewLHS = DAG.getNode(ISD::OR, Tmp.getValueType(), Tmp, NewLHS); - NewRHS = SDOperand(); + NewRHS = SDValue(); } } -SDOperand DAGTypeLegalizer::SoftenFloatOp_BIT_CONVERT(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatOp_BIT_CONVERT(SDNode *N) { return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), GetSoftenedFloat(N->getOperand(0))); } -SDOperand DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) { - SDOperand NewLHS = N->getOperand(2), NewRHS = N->getOperand(3); +SDValue DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) { + SDValue NewLHS = N->getOperand(2), NewRHS = N->getOperand(3); ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get(); SoftenSetCCOperands(NewLHS, NewRHS, CCCode); @@ -420,29 +420,29 @@ SDOperand DAGTypeLegalizer::SoftenFloatOp_BR_CC(SDNode *N) { } // Update N to have the operands specified. - return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), + return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), DAG.getCondCode(CCCode), NewLHS, NewRHS, N->getOperand(4)); } -SDOperand DAGTypeLegalizer::SoftenFloatOp_FP_TO_SINT(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_SINT(SDNode *N) { MVT RVT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::getFPTOSINT(N->getOperand(0).getValueType(), RVT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_SINT!"); - SDOperand Op = GetSoftenedFloat(N->getOperand(0)); + SDValue Op = GetSoftenedFloat(N->getOperand(0)); return MakeLibCall(LC, RVT, &Op, 1, false); } -SDOperand DAGTypeLegalizer::SoftenFloatOp_FP_TO_UINT(SDNode *N) { +SDValue DAGTypeLegalizer::SoftenFloatOp_FP_TO_UINT(SDNode *N) { MVT RVT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::getFPTOUINT(N->getOperand(0).getValueType(), RVT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_UINT!"); - SDOperand Op = GetSoftenedFloat(N->getOperand(0)); + SDValue Op = GetSoftenedFloat(N->getOperand(0)); return MakeLibCall(LC, RVT, &Op, 1, false); } -SDOperand DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) { - SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); +SDValue DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) { + SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get(); SoftenSetCCOperands(NewLHS, NewRHS, CCCode); @@ -454,13 +454,13 @@ SDOperand DAGTypeLegalizer::SoftenFloatOp_SELECT_CC(SDNode *N) { } // Update N to have the operands specified. - return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS, + return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS, N->getOperand(2), N->getOperand(3), DAG.getCondCode(CCCode)); } -SDOperand DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) { - SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); +SDValue DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) { + SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get(); SoftenSetCCOperands(NewLHS, NewRHS, CCCode); @@ -472,15 +472,15 @@ SDOperand DAGTypeLegalizer::SoftenFloatOp_SETCC(SDNode *N) { } // Otherwise, update N to have the operands specified. - return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS, + return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS, DAG.getCondCode(CCCode)); } -SDOperand DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) { assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!"); assert(OpNo == 1 && "Can only soften the stored value!"); StoreSDNode *ST = cast<StoreSDNode>(N); - SDOperand Val = ST->getValue(); + SDValue Val = ST->getValue(); if (ST->isTruncatingStore()) // Do an FP_ROUND followed by a non-truncating store. @@ -505,8 +505,8 @@ SDOperand DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) { /// know that (at least) one result needs expansion. void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) { DEBUG(cerr << "Expand float result: "; N->dump(&DAG); cerr << "\n"); - SDOperand Lo, Hi; - Lo = Hi = SDOperand(); + SDValue Lo, Hi; + Lo = Hi = SDValue(); // See if the target wants to custom expand this node. if (TLI.getOperationAction(N->getOpcode(), N->getValueType(ResNo)) == @@ -554,11 +554,11 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) { // If Lo/Hi is null, the sub-method took care of registering results etc. if (Lo.Val) - SetExpandedFloat(SDOperand(N, ResNo), Lo, Hi); + SetExpandedFloat(SDValue(N, ResNo), Lo, Hi); } -void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo, + SDValue &Hi) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); assert(NVT.getSizeInBits() == integerPartWidth && "Do not know how to expand this float constant!"); @@ -569,10 +569,10 @@ void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDOperand &Lo, &C.getRawData()[0])), NVT); } -void DAGTypeLegalizer::ExpandFloatRes_FADD(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; - SDOperand Call = MakeLibCall(GetFPLibCall(N->getValueType(0), +void DAGTypeLegalizer::ExpandFloatRes_FADD(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Call = MakeLibCall(GetFPLibCall(N->getValueType(0), RTLIB::ADD_F32, RTLIB::ADD_F64, RTLIB::ADD_F80, @@ -583,11 +583,11 @@ void DAGTypeLegalizer::ExpandFloatRes_FADD(SDNode *N, SDOperand &Lo, Lo = Call.getOperand(0); Hi = Call.getOperand(1); } -void DAGTypeLegalizer::ExpandFloatRes_FABS(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandFloatRes_FABS(SDNode *N, SDValue &Lo, + SDValue &Hi) { assert(N->getValueType(0) == MVT::ppcf128 && "Logic only correct for ppcf128!"); - SDOperand Tmp; + SDValue Tmp; GetExpandedFloat(N->getOperand(0), Lo, Tmp); Hi = DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp); // Lo = Hi==fabs(Hi) ? Lo : -Lo; @@ -596,10 +596,10 @@ void DAGTypeLegalizer::ExpandFloatRes_FABS(SDNode *N, SDOperand &Lo, DAG.getCondCode(ISD::SETEQ)); } -void DAGTypeLegalizer::ExpandFloatRes_FDIV(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; - SDOperand Call = MakeLibCall(GetFPLibCall(N->getValueType(0), +void DAGTypeLegalizer::ExpandFloatRes_FDIV(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Call = MakeLibCall(GetFPLibCall(N->getValueType(0), RTLIB::DIV_F32, RTLIB::DIV_F64, RTLIB::DIV_F80, @@ -610,10 +610,10 @@ void DAGTypeLegalizer::ExpandFloatRes_FDIV(SDNode *N, SDOperand &Lo, Lo = Call.getOperand(0); Hi = Call.getOperand(1); } -void DAGTypeLegalizer::ExpandFloatRes_FMUL(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; - SDOperand Call = MakeLibCall(GetFPLibCall(N->getValueType(0), +void DAGTypeLegalizer::ExpandFloatRes_FMUL(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Call = MakeLibCall(GetFPLibCall(N->getValueType(0), RTLIB::MUL_F32, RTLIB::MUL_F64, RTLIB::MUL_F80, @@ -624,24 +624,24 @@ void DAGTypeLegalizer::ExpandFloatRes_FMUL(SDNode *N, SDOperand &Lo, Lo = Call.getOperand(0); Hi = Call.getOperand(1); } -void DAGTypeLegalizer::ExpandFloatRes_FNEG(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandFloatRes_FNEG(SDNode *N, SDValue &Lo, + SDValue &Hi) { GetExpandedFloat(N->getOperand(0), Lo, Hi); Lo = DAG.getNode(ISD::FNEG, Lo.getValueType(), Lo); Hi = DAG.getNode(ISD::FNEG, Hi.getValueType(), Hi); } -void DAGTypeLegalizer::ExpandFloatRes_FP_EXTEND(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandFloatRes_FP_EXTEND(SDNode *N, SDValue &Lo, + SDValue &Hi) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); Hi = DAG.getNode(ISD::FP_EXTEND, NVT, N->getOperand(0)); Lo = DAG.getConstantFP(APFloat(APInt(NVT.getSizeInBits(), 0)), NVT); } -void DAGTypeLegalizer::ExpandFloatRes_FSUB(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; - SDOperand Call = MakeLibCall(GetFPLibCall(N->getValueType(0), +void DAGTypeLegalizer::ExpandFloatRes_FSUB(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Call = MakeLibCall(GetFPLibCall(N->getValueType(0), RTLIB::SUB_F32, RTLIB::SUB_F64, RTLIB::SUB_F80, @@ -652,8 +652,8 @@ void DAGTypeLegalizer::ExpandFloatRes_FSUB(SDNode *N, SDOperand &Lo, Lo = Call.getOperand(0); Hi = Call.getOperand(1); } -void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDValue &Lo, + SDValue &Hi) { if (ISD::isNormalLoad(N)) { ExpandRes_NormalLoad(N, Lo, Hi); return; @@ -661,8 +661,8 @@ void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDOperand &Lo, assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!"); LoadSDNode *LD = cast<LoadSDNode>(N); - SDOperand Chain = LD->getChain(); - SDOperand Ptr = LD->getBasePtr(); + SDValue Chain = LD->getChain(); + SDValue Ptr = LD->getBasePtr(); MVT NVT = TLI.getTypeToTransformTo(LD->getValueType(0)); assert(NVT.isByteSized() && "Expanded type not byte sized!"); @@ -681,15 +681,15 @@ void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDOperand &Lo, // Modified the chain - switch anything that used the old chain to use the // new one. - ReplaceValueWith(SDOperand(LD, 1), Chain); + ReplaceValueWith(SDValue(LD, 1), Chain); } -void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo, + SDValue &Hi) { assert(N->getValueType(0) == MVT::ppcf128 && "Unsupported XINT_TO_FP!"); MVT VT = N->getValueType(0); MVT NVT = TLI.getTypeToTransformTo(VT); - SDOperand Src = N->getOperand(0); + SDValue Src = N->getOperand(0); MVT SrcVT = Src.getValueType(); // First do an SINT_TO_FP, whether the original was signed or unsigned. @@ -760,11 +760,11 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDOperand &Lo, /// need promotion or expansion as well as the specified one. bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) { DEBUG(cerr << "Expand float operand: "; N->dump(&DAG); cerr << "\n"); - SDOperand Res = SDOperand(); + SDValue Res = SDValue(); if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType()) == TargetLowering::Custom) - Res = TLI.LowerOperation(SDOperand(N, OpNo), DAG); + Res = TLI.LowerOperation(SDValue(N, OpNo), DAG); if (Res.Val == 0) { switch (N->getOpcode()) { @@ -806,16 +806,16 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) { assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 && "Invalid operand expansion"); - ReplaceValueWith(SDOperand(N, 0), Res); + ReplaceValueWith(SDValue(N, 0), Res); return false; } /// FloatExpandSetCCOperands - Expand the operands of a comparison. This code /// is shared among BR_CC, SELECT_CC, and SETCC handlers. -void DAGTypeLegalizer::FloatExpandSetCCOperands(SDOperand &NewLHS, - SDOperand &NewRHS, +void DAGTypeLegalizer::FloatExpandSetCCOperands(SDValue &NewLHS, + SDValue &NewRHS, ISD::CondCode &CCCode) { - SDOperand LHSLo, LHSHi, RHSLo, RHSHi; + SDValue LHSLo, LHSHi, RHSLo, RHSHi; GetExpandedFloat(NewLHS, LHSLo, LHSHi); GetExpandedFloat(NewRHS, RHSLo, RHSHi); @@ -827,7 +827,7 @@ void DAGTypeLegalizer::FloatExpandSetCCOperands(SDOperand &NewLHS, // BNE crN, L: // FCMP crN, lo1, lo2 // The following can be improved, but not that much. - SDOperand Tmp1, Tmp2, Tmp3; + SDValue Tmp1, Tmp2, Tmp3; Tmp1 = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, ISD::SETEQ); Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(LHSLo), LHSLo, RHSLo, CCCode); Tmp3 = DAG.getNode(ISD::AND, Tmp1.getValueType(), Tmp1, Tmp2); @@ -835,11 +835,11 @@ void DAGTypeLegalizer::FloatExpandSetCCOperands(SDOperand &NewLHS, Tmp2 = DAG.getSetCC(TLI.getSetCCResultType(LHSHi), LHSHi, RHSHi, CCCode); Tmp1 = DAG.getNode(ISD::AND, Tmp1.getValueType(), Tmp1, Tmp2); NewLHS = DAG.getNode(ISD::OR, Tmp1.getValueType(), Tmp1, Tmp3); - NewRHS = SDOperand(); // LHS is the result, not a compare. + NewRHS = SDValue(); // LHS is the result, not a compare. } -SDOperand DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) { - SDOperand NewLHS = N->getOperand(2), NewRHS = N->getOperand(3); +SDValue DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) { + SDValue NewLHS = N->getOperand(2), NewRHS = N->getOperand(3); ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get(); FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode); @@ -851,36 +851,36 @@ SDOperand DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) { } // Update N to have the operands specified. - return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), + return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), DAG.getCondCode(CCCode), NewLHS, NewRHS, N->getOperand(4)); } -SDOperand DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) { +SDValue DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) { assert(N->getOperand(0).getValueType() == MVT::ppcf128 && "Logic only correct for ppcf128!"); - SDOperand Lo, Hi; + SDValue Lo, Hi; GetExpandedFloat(N->getOperand(0), Lo, Hi); // Round it the rest of the way (e.g. to f32) if needed. return DAG.getNode(ISD::FP_ROUND, N->getValueType(0), Hi, N->getOperand(1)); } -SDOperand DAGTypeLegalizer::ExpandFloatOp_FP_TO_SINT(SDNode *N) { +SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_SINT(SDNode *N) { MVT RVT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::getFPTOSINT(N->getOperand(0).getValueType(), RVT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_SINT!"); return MakeLibCall(LC, RVT, &N->getOperand(0), 1, false); } -SDOperand DAGTypeLegalizer::ExpandFloatOp_FP_TO_UINT(SDNode *N) { +SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_UINT(SDNode *N) { MVT RVT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::getFPTOUINT(N->getOperand(0).getValueType(), RVT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported FP_TO_UINT!"); return MakeLibCall(LC, N->getValueType(0), &N->getOperand(0), 1, false); } -SDOperand DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) { - SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); +SDValue DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) { + SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get(); FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode); @@ -892,13 +892,13 @@ SDOperand DAGTypeLegalizer::ExpandFloatOp_SELECT_CC(SDNode *N) { } // Update N to have the operands specified. - return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS, + return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS, N->getOperand(2), N->getOperand(3), DAG.getCondCode(CCCode)); } -SDOperand DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) { - SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); +SDValue DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) { + SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get(); FloatExpandSetCCOperands(NewLHS, NewRHS, CCCode); @@ -910,11 +910,11 @@ SDOperand DAGTypeLegalizer::ExpandFloatOp_SETCC(SDNode *N) { } // Otherwise, update N to have the operands specified. - return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS, + return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS, DAG.getCondCode(CCCode)); } -SDOperand DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) { if (ISD::isNormalStore(N)) return ExpandOp_NormalStore(N, OpNo); @@ -922,14 +922,14 @@ SDOperand DAGTypeLegalizer::ExpandFloatOp_STORE(SDNode *N, unsigned OpNo) { assert(OpNo == 1 && "Can only expand the stored value so far"); StoreSDNode *ST = cast<StoreSDNode>(N); - SDOperand Chain = ST->getChain(); - SDOperand Ptr = ST->getBasePtr(); + SDValue Chain = ST->getChain(); + SDValue Ptr = ST->getBasePtr(); MVT NVT = TLI.getTypeToTransformTo(ST->getValue().getValueType()); assert(NVT.isByteSized() && "Expanded type not byte sized!"); assert(ST->getMemoryVT().bitsLE(NVT) && "Float type not round?"); - SDOperand Lo, Hi; + SDValue Lo, Hi; GetExpandedOp(ST->getValue(), Lo, Hi); return DAG.getTruncStore(Chain, Lo, Ptr, diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index 8a3086190d..1dc933cc7e 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -31,7 +31,7 @@ using namespace llvm; /// expansion, we just know that (at least) one result needs promotion. void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) { DEBUG(cerr << "Promote integer result: "; N->dump(&DAG); cerr << "\n"); - SDOperand Result = SDOperand(); + SDValue Result = SDValue(); // See if the target wants to custom expand this node. if (TLI.getOperationAction(N->getOpcode(), N->getValueType(ResNo)) == @@ -100,28 +100,28 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) { // If Result is null, the sub-method took care of registering the result. if (Result.Val) - SetPromotedInteger(SDOperand(N, ResNo), Result); + SetPromotedInteger(SDValue(N, ResNo), Result); } -SDOperand DAGTypeLegalizer::PromoteIntRes_AssertSext(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_AssertSext(SDNode *N) { // Sign-extend the new bits, and continue the assertion. MVT OldVT = N->getValueType(0); - SDOperand Op = GetPromotedInteger(N->getOperand(0)); + SDValue Op = GetPromotedInteger(N->getOperand(0)); return DAG.getNode(ISD::AssertSext, Op.getValueType(), DAG.getNode(ISD::SIGN_EXTEND_INREG, Op.getValueType(), Op, DAG.getValueType(OldVT)), N->getOperand(1)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_AssertZext(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_AssertZext(SDNode *N) { // Zero the new bits, and continue the assertion. MVT OldVT = N->getValueType(0); - SDOperand Op = GetPromotedInteger(N->getOperand(0)); + SDValue Op = GetPromotedInteger(N->getOperand(0)); return DAG.getNode(ISD::AssertZext, Op.getValueType(), DAG.getZeroExtendInReg(Op, OldVT), N->getOperand(1)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) { - SDOperand InOp = N->getOperand(0); +SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) { + SDValue InOp = N->getOperand(0); MVT InVT = InOp.getValueType(); MVT NInVT = TLI.getTypeToTransformTo(InVT); MVT OutVT = TLI.getTypeToTransformTo(N->getValueType(0)); @@ -150,7 +150,7 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) { case SplitVector: // For example, i32 = BIT_CONVERT v2i16 on alpha. Convert the split // pieces of the input into integers and reassemble in the final type. - SDOperand Lo, Hi; + SDValue Lo, Hi; GetSplitVector(N->getOperand(0), Lo, Hi); Lo = BitConvertToInteger(Lo); Hi = BitConvertToInteger(Hi); @@ -166,12 +166,12 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) { // Otherwise, lower the bit-convert to a store/load from the stack, then // promote the load. - SDOperand Op = CreateStackStoreLoad(InOp, N->getValueType(0)); + SDValue Op = CreateStackStoreLoad(InOp, N->getValueType(0)); return PromoteIntRes_LOAD(cast<LoadSDNode>(Op.Val)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_BSWAP(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntRes_BSWAP(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); MVT OVT = N->getValueType(0); MVT NVT = Op.getValueType(); @@ -180,7 +180,7 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_BSWAP(SDNode *N) { DAG.getConstant(DiffBits, TLI.getShiftAmountTy())); } -SDOperand DAGTypeLegalizer::PromoteIntRes_BUILD_PAIR(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_BUILD_PAIR(SDNode *N) { // The pair element type may be legal, or may not promote to the same type as // the result, for example i14 = BUILD_PAIR (i7, i7). Handle all cases. return DAG.getNode(ISD::ANY_EXTEND, @@ -188,19 +188,19 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_BUILD_PAIR(SDNode *N) { JoinIntegers(N->getOperand(0), N->getOperand(1))); } -SDOperand DAGTypeLegalizer::PromoteIntRes_Constant(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_Constant(SDNode *N) { MVT VT = N->getValueType(0); // Zero extend things like i1, sign extend everything else. It shouldn't // matter in theory which one we pick, but this tends to give better code? unsigned Opc = VT.isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; - SDOperand Result = DAG.getNode(Opc, TLI.getTypeToTransformTo(VT), - SDOperand(N, 0)); + SDValue Result = DAG.getNode(Opc, TLI.getTypeToTransformTo(VT), + SDValue(N, 0)); assert(isa<ConstantSDNode>(Result) && "Didn't constant fold ext?"); return Result; } -SDOperand DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); MVT OVT = N->getValueType(0); MVT NVT = Op.getValueType(); // Zero extend to the promoted type and do the count there. @@ -211,16 +211,16 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) { OVT.getSizeInBits(), NVT)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_CTPOP(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntRes_CTPOP(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); MVT OVT = N->getValueType(0); MVT NVT = Op.getValueType(); // Zero extend to the promoted type and do the count there. return DAG.getNode(ISD::CTPOP, NVT, DAG.getZeroExtendInReg(Op, OVT)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); MVT OVT = N->getValueType(0); MVT NVT = Op.getValueType(); // The count is the same in the promoted type except if the original @@ -232,9 +232,9 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_CTTZ(SDNode *N) { return DAG.getNode(ISD::CTTZ, NVT, Op); } -SDOperand DAGTypeLegalizer::PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N) { MVT OldVT = N->getValueType(0); - SDOperand OldVec = N->getOperand(0); + SDValue OldVec = N->getOperand(0); unsigned OldElts = OldVec.getValueType().getVectorNumElements(); if (OldElts == 1) { @@ -253,31 +253,31 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N) { MVT NewVT = MVT::getIntegerVT(2 * OldVT.getSizeInBits()); assert(OldVT.isSimple() && NewVT.isSimple()); - SDOperand NewVec = DAG.getNode(ISD::BIT_CONVERT, + SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, MVT::getVectorVT(NewVT, OldElts / 2), OldVec); // Extract the element at OldIdx / 2 from the new vector. - SDOperand OldIdx = N->getOperand(1); - SDOperand NewIdx = DAG.getNode(ISD::SRL, OldIdx.getValueType(), OldIdx, + SDValue OldIdx = N->getOperand(1); + SDValue NewIdx = DAG.getNode(ISD::SRL, OldIdx.getValueType(), OldIdx, DAG.getConstant(1, TLI.getShiftAmountTy())); - SDOperand Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, NewVT, NewVec, NewIdx); + SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, NewVT, NewVec, NewIdx); // Select the appropriate half of the element: Lo if OldIdx was even, // Hi if it was odd. - SDOperand Lo = Elt; - SDOperand Hi = DAG.getNode(ISD::SRL, NewVT, Elt, + SDValue Lo = Elt; + SDValue Hi = DAG.getNode(ISD::SRL, NewVT, Elt, DAG.getConstant(OldVT.getSizeInBits(), TLI.getShiftAmountTy())); if (TLI.isBigEndian()) std::swap(Lo, Hi); - SDOperand Odd = DAG.getNode(ISD::AND, OldIdx.getValueType(), OldIdx, + SDValue Odd = DAG.getNode(ISD::AND, OldIdx.getValueType(), OldIdx, DAG.getConstant(1, TLI.getShiftAmountTy())); return DAG.getNode(ISD::SELECT, NewVT, Odd, Hi, Lo); } -SDOperand DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) { unsigned NewOpc = N->getOpcode(); MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); @@ -296,11 +296,11 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_FP_TO_XINT(SDNode *N) { return DAG.getNode(NewOpc, NVT, N->getOperand(0)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); if (getTypeAction(N->getOperand(0).getValueType()) == PromoteInteger) { - SDOperand Res = GetPromotedInteger(N->getOperand(0)); + SDValue Res = GetPromotedInteger(N->getOperand(0)); assert(Res.getValueType().getSizeInBits() <= NVT.getSizeInBits() && "Extension doesn't make sense!"); @@ -322,26 +322,26 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_INT_EXTEND(SDNode *N) { return DAG.getNode(N->getOpcode(), NVT, N->getOperand(0)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_LOAD(LoadSDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_LOAD(LoadSDNode *N) { assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!"); MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(N) ? ISD::EXTLOAD : N->getExtensionType(); - SDOperand Res = DAG.getExtLoad(ExtType, NVT, N->getChain(), N->getBasePtr(), + SDValue Res = DAG.getExtLoad(ExtType, NVT, N->getChain(), N->getBasePtr(), N->getSrcValue(), N->getSrcValueOffset(), N->getMemoryVT(), N->isVolatile(), N->getAlignment()); // Legalized the chain result - switch anything that used the old chain to // use the new one. - ReplaceValueWith(SDOperand(N, 1), Res.getValue(1)); + ReplaceValueWith(SDValue(N, 1), Res.getValue(1)); return Res; } -SDOperand DAGTypeLegalizer::PromoteIntRes_SDIV(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_SDIV(SDNode *N) { // Sign extend the input. - SDOperand LHS = GetPromotedInteger(N->getOperand(0)); - SDOperand RHS = GetPromotedInteger(N->getOperand(1)); + SDValue LHS = GetPromotedInteger(N->getOperand(0)); + SDValue RHS = GetPromotedInteger(N->getOperand(1)); MVT VT = N->getValueType(0); LHS = DAG.getNode(ISD::SIGN_EXTEND_INREG, LHS.getValueType(), LHS, DAG.getValueType(VT)); @@ -351,65 +351,65 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_SDIV(SDNode *N) { return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS); } -SDOperand DAGTypeLegalizer::PromoteIntRes_SELECT(SDNode *N) { - SDOperand LHS = GetPromotedInteger(N->getOperand(1)); - SDOperand RHS = GetPromotedInteger(N->getOperand(2)); +SDValue DAGTypeLegalizer::PromoteIntRes_SELECT(SDNode *N) { + SDValue LHS = GetPromotedInteger(N->getOperand(1)); + SDValue RHS = GetPromotedInteger(N->getOperand(2)); return DAG.getNode(ISD::SELECT, LHS.getValueType(), N->getOperand(0),LHS,RHS); } -SDOperand DAGTypeLegalizer::PromoteIntRes_SELECT_CC(SDNode *N) { - SDOperand LHS = GetPromotedInteger(N->getOperand(2)); - SDOperand RHS = GetPromotedInteger(N->getOperand(3)); +SDValue DAGTypeLegalizer::PromoteIntRes_SELECT_CC(SDNode *N) { + SDValue LHS = GetPromotedInteger(N->getOperand(2)); + SDValue RHS = GetPromotedInteger(N->getOperand(3)); return DAG.getNode(ISD::SELECT_CC, LHS.getValueType(), N->getOperand(0), N->getOperand(1), LHS, RHS, N->getOperand(4)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_SETCC(SDNode *N) { assert(isTypeLegal(TLI.getSetCCResultType(N->getOperand(0))) && "SetCC type is not legal??"); return DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(N->getOperand(0)), N->getOperand(0), N->getOperand(1), N->getOperand(2)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_SHL(SDNode *N) { return DAG.getNode(ISD::SHL, TLI.getTypeToTransformTo(N->getValueType(0)), GetPromotedInteger(N->getOperand(0)), N->getOperand(1)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); return DAG.getNode(ISD::SIGN_EXTEND_INREG, Op.getValueType(), Op, N->getOperand(1)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_SimpleIntBinOp(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_SimpleIntBinOp(SDNode *N) { // The input may have strange things in the top bits of the registers, but // these operations don't care. They may have weird bits going out, but // that too is okay if they are integer operations. - SDOperand LHS = GetPromotedInteger(N->getOperand(0)); - SDOperand RHS = GetPromotedInteger(N->getOperand(1)); + SDValue LHS = GetPromotedInteger(N->getOperand(0)); + SDValue RHS = GetPromotedInteger(N->getOperand(1)); return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS); } -SDOperand DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_SRA(SDNode *N) { // The input value must be properly sign extended. MVT VT = N->getValueType(0); MVT NVT = TLI.getTypeToTransformTo(VT); - SDOperand Res = GetPromotedInteger(N->getOperand(0)); + SDValue Res = GetPromotedInteger(N->getOperand(0)); Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, NVT, Res, DAG.getValueType(VT)); return DAG.getNode(ISD::SRA, NVT, Res, N->getOperand(1)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_SRL(SDNode *N) { // The input value must be properly zero extended. MVT VT = N->getValueType(0); MVT NVT = TLI.getTypeToTransformTo(VT); - SDOperand Res = ZExtPromotedInteger(N->getOperand(0)); + SDValue Res = ZExtPromotedInteger(N->getOperand(0)); return DAG.getNode(ISD::SRL, NVT, Res, N->getOperand(1)); } -SDOperand DAGTypeLegalizer::PromoteIntRes_TRUNCATE(SDNode *N) { - SDOperand Res; +SDValue DAGTypeLegalizer::PromoteIntRes_TRUNCATE(SDNode *N) { + SDValue Res; switch (getTypeAction(N->getOperand(0).getValueType())) { default: assert(0 && "Unknown type action!"); @@ -432,10 +432,10 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_TRUNCATE(SDNode *N) { return DAG.getNode(ISD::TRUNCATE, NVT, Res); } -SDOperand DAGTypeLegalizer::PromoteIntRes_UDIV(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_UDIV(SDNode *N) { // Zero extend the input. - SDOperand LHS = GetPromotedInteger(N->getOperand(0)); - SDOperand RHS = GetPromotedInteger(N->getOperand(1)); + SDValue LHS = GetPromotedInteger(N->getOperand(0)); + SDValue RHS = GetPromotedInteger(N->getOperand(1)); MVT VT = N->getValueType(0); LHS = DAG.getZeroExtendInReg(LHS, VT); RHS = DAG.getZeroExtendInReg(RHS, VT); @@ -443,17 +443,17 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_UDIV(SDNode *N) { return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS); } -SDOperand DAGTypeLegalizer::PromoteIntRes_UNDEF(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntRes_UNDEF(SDNode *N) { return DAG.getNode(ISD::UNDEF, TLI.getTypeToTransformTo(N->getValueType(0))); } -SDOperand DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) { - SDOperand Chain = N->getOperand(0); // Get the chain. - SDOperand Ptr = N->getOperand(1); // Get the pointer. +SDValue DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) { + SDValue Chain = N->getOperand(0); // Get the chain. + SDValue Ptr = N->getOperand(1); // Get the pointer. MVT VT = N->getValueType(0); const Value *V = cast<SrcValueSDNode>(N->getOperand(2))->getValue(); - SDOperand VAList = DAG.getLoad(TLI.getPointerTy(), Chain, Ptr, V, 0); + SDValue VAList = DAG.getLoad(TLI.getPointerTy(), Chain, Ptr, V, 0); // Increment the arg pointer, VAList, to the next vaarg // FIXME: should the ABI size be used for the increment? Think of @@ -461,7 +461,7 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) { // integers of unusual size (such MVT::i1, which gives an increment // of zero here!). unsigned Increment = VT.getSizeInBits() / 8; - SDOperand Tmp = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList, + SDValue Tmp = DAG.getNode(ISD::ADD, TLI.getPointerTy(), VAList, DAG.getConstant(Increment, TLI.getPointerTy())); // Store the incremented VAList to the pointer. @@ -473,7 +473,7 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) { // Legalized the chain result - switch anything that used the old chain to // use the new one. - ReplaceValueWith(SDOperand(N, 1), Tmp.getValue(1)); + ReplaceValueWith(SDValue(N, 1), Tmp.getValue(1)); return Tmp; } @@ -488,11 +488,11 @@ SDOperand DAGTypeLegalizer::PromoteIntRes_VAARG(SDNode *N) { /// node may need promotion or expansion as well as the specified one. bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) { DEBUG(cerr << "Promote integer operand: "; N->dump(&DAG); cerr << "\n"); - SDOperand Res = SDOperand(); + SDValue Res = SDValue(); if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType()) == TargetLowering::Custom) - Res = TLI.LowerOperation(SDOperand(N, OpNo), DAG); + Res = TLI.LowerOperation(SDValue(N, OpNo), DAG); if (Res.Val == 0) { switch (N->getOpcode()) { @@ -542,13 +542,13 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) { assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 && "Invalid operand expansion"); - ReplaceValueWith(SDOperand(N, 0), Res); + ReplaceValueWith(SDValue(N, 0), Res); return false; } /// PromoteSetCCOperands - Promote the operands of a comparison. This code is /// shared among BR_CC, SELECT_CC, and SETCC handlers. -void DAGTypeLegalizer::PromoteSetCCOperands(SDOperand &NewLHS,SDOperand &NewRHS, +void DAGTypeLegalizer::PromoteSetCCOperands(SDValue &NewLHS,SDValue &NewRHS, ISD::CondCode CCCode) { MVT VT = NewLHS.getValueType(); @@ -586,27 +586,27 @@ void DAGTypeLegalizer::PromoteSetCCOperands(SDOperand &NewLHS,SDOperand &NewRHS, } } -SDOperand DAGTypeLegalizer::PromoteIntOp_ANY_EXTEND(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntOp_ANY_EXTEND(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); return DAG.getNode(ISD::ANY_EXTEND, N->getValueType(0), Op); } -SDOperand DAGTypeLegalizer::PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo) { assert(OpNo == 2 && "Don't know how to promote this operand!"); - SDOperand LHS = N->getOperand(2); - SDOperand RHS = N->getOperand(3); + SDValue LHS = N->getOperand(2); + SDValue RHS = N->getOperand(3); PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(1))->get()); // The chain (Op#0), CC (#1) and basic block destination (Op#4) are always // legal types. - return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), + return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), N->getOperand(1), LHS, RHS, N->getOperand(4)); } -SDOperand DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) { assert(OpNo == 1 && "only know how to promote condition"); - SDOperand Cond = GetPromotedInteger(N->getOperand(1)); // Promote condition. + SDValue Cond = GetPromotedInteger(N->getOperand(1)); // Promote condition. // The top bits of the promoted condition are not necessarily zero, ensure // that the value is properly zero extended. @@ -616,15 +616,15 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) { Cond = DAG.getZeroExtendInReg(Cond, MVT::i1); // The chain (Op#0) and basic block destination (Op#2) are always legal types. - return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), Cond, + return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), Cond, N->getOperand(2)); } -SDOperand DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) { // Since the result type is legal, the operands must promote to it. MVT OVT = N->getOperand(0).getValueType(); - SDOperand Lo = GetPromotedInteger(N->getOperand(0)); - SDOperand Hi = GetPromotedInteger(N->getOperand(1)); + SDValue Lo = GetPromotedInteger(N->getOperand(0)); + SDValue Hi = GetPromotedInteger(N->getOperand(1)); assert(Lo.getValueType() == N->getValueType(0) && "Operand over promoted?"); Lo = DAG.getZeroExtendInReg(Lo, OVT); @@ -634,7 +634,7 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_BUILD_PAIR(SDNode *N) { return DAG.getNode(ISD::OR, N->getValueType(0), Lo, Hi); } -SDOperand DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) { +SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) { // The vector type is legal but the element type is not. This implies // that the vector is a power-of-two in length and that the element // type does not have a strange size (eg: it is not i1). @@ -648,19 +648,19 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) { MVT NewVT = MVT::getIntegerVT(2 * OldVT.getSizeInBits()); assert(OldVT.isSimple() && NewVT.isSimple()); - std::vector<SDOperand> NewElts; + std::vector<SDValue> NewElts; NewElts.reserve(NumElts/2); for (unsigned i = 0; i < NumElts; i += 2) { // Combine two successive elements into one promoted element. - SDOperand Lo = N->getOperand(i); - SDOperand Hi = N->getOperand(i+1); + SDValue Lo = N->getOperand(i); + SDValue Hi = N->getOperand(i+1); if (TLI.isBigEndian()) std::swap(Lo, Hi); NewElts.push_back(JoinIntegers(Lo, Hi)); } - SDOperand NewVec = DAG.getNode(ISD::BUILD_VECTOR, + SDValue NewVec = DAG.getNode(ISD::BUILD_VECTOR, MVT::getVectorVT(NewVT, NewElts.size()), &NewElts[0], NewElts.size()); @@ -668,19 +668,19 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) { return DAG.getNode(ISD::BIT_CONVERT, VecVT, NewVec); } -SDOperand DAGTypeLegalizer::PromoteIntOp_FP_EXTEND(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntOp_FP_EXTEND(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); return DAG.getNode(ISD::FP_EXTEND, N->getValueType(0), Op); } -SDOperand DAGTypeLegalizer::PromoteIntOp_FP_ROUND(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntOp_FP_ROUND(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); return DAG.getNode(ISD::FP_ROUND, N->getValueType(0), Op, DAG.getIntPtrConstant(0)); } -SDOperand DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, - unsigned OpNo) { +SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, + unsigned OpNo) { if (OpNo == 1) { // Promote the inserted value. This is valid because the type does not // have to match the vector element type. @@ -689,7 +689,7 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, assert(N->getOperand(1).getValueType().getSizeInBits() >= N->getValueType(0).getVectorElementType().getSizeInBits() && "Type of inserted value narrower than vector element type!"); - return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), + return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), GetPromotedInteger(N->getOperand(1)), N->getOperand(2)); } @@ -697,14 +697,14 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, assert(OpNo == 2 && "Different operand and result vector types?"); // Promote the index. - SDOperand Idx = N->getOperand(2); + SDValue Idx = N->getOperand(2); Idx = DAG.getZeroExtendInReg(GetPromotedInteger(Idx), Idx.getValueType()); - return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), + return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), N->getOperand(1), Idx); } -SDOperand DAGTypeLegalizer::PromoteIntOp_INT_TO_FP(SDNode *N) { - SDOperand In = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntOp_INT_TO_FP(SDNode *N) { + SDValue In = GetPromotedInteger(N->getOperand(0)); MVT OpVT = N->getOperand(0).getValueType(); if (N->getOpcode() == ISD::UINT_TO_FP) In = DAG.getZeroExtendInReg(In, OpVT); @@ -712,23 +712,23 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_INT_TO_FP(SDNode *N) { In = DAG.getNode(ISD::SIGN_EXTEND_INREG, In.getValueType(), In, DAG.getValueType(OpVT)); - return DAG.UpdateNodeOperands(SDOperand(N, 0), In); + return DAG.UpdateNodeOperands(SDValue(N, 0), In); } -SDOperand DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) { - SDOperand NewOps[6]; +SDValue DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) { + SDValue NewOps[6]; NewOps[0] = N->getOperand(0); for (unsigned i = 1; i < array_lengthof(NewOps); ++i) { - SDOperand Flag = GetPromotedInteger(N->getOperand(i)); + SDValue Flag = GetPromotedInteger(N->getOperand(i)); NewOps[i] = DAG.getZeroExtendInReg(Flag, MVT::i1); } - return DAG.UpdateNodeOperands(SDOperand (N, 0), NewOps, + return DAG.UpdateNodeOperands(SDValue (N, 0), NewOps, array_lengthof(NewOps)); } -SDOperand DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) { assert(OpNo == 0 && "Only know how to promote condition"); - SDOperand Cond = GetPromotedInteger(N->getOperand(0)); // Promote condition. + SDValue Cond = GetPromotedInteger(N->getOperand(0)); // Promote condition. // The top bits of the promoted condition are not necessarily zero, ensure // that the value is properly zero extended. @@ -738,48 +738,48 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_SELECT(SDNode *N, unsigned OpNo) { Cond = DAG.getZeroExtendInReg(Cond, MVT::i1); // The chain (Op#0) and basic block destination (Op#2) are always legal types. - return DAG.UpdateNodeOperands(SDOperand(N, 0), Cond, N->getOperand(1), + return DAG.UpdateNodeOperands(SDValue(N, 0), Cond, N->getOperand(1), N->getOperand(2)); } -SDOperand DAGTypeLegalizer::PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo) { assert(OpNo == 0 && "Don't know how to promote this operand!"); - SDOperand LHS = N->getOperand(0); - SDOperand RHS = N->getOperand(1); + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(4))->get()); // The CC (#4) and the possible return values (#2 and #3) have legal types. - return DAG.UpdateNodeOperands(SDOperand(N, 0), LHS, RHS, N->getOperand(2), + return DAG.UpdateNodeOperands(SDValue(N, 0), LHS, RHS, N->getOperand(2), N->getOperand(3), N->getOperand(4)); } -SDOperand DAGTypeLegalizer::PromoteIntOp_SETCC(SDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::PromoteIntOp_SETCC(SDNode *N, unsigned OpNo) { assert(OpNo == 0 && "Don't know how to promote this operand!"); - SDOperand LHS = N->getOperand(0); - SDOperand RHS = N->getOperand(1); + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); PromoteSetCCOperands(LHS, RHS, cast<CondCodeSDNode>(N->getOperand(2))->get()); // The CC (#2) is always legal. - return DAG.UpdateNodeOperands(SDOperand(N, 0), LHS, RHS, N->getOperand(2)); + return DAG.UpdateNodeOperands(SDValue(N, 0), LHS, RHS, N->getOperand(2)); } -SDOperand DAGTypeLegalizer::PromoteIntOp_SIGN_EXTEND(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntOp_SIGN_EXTEND(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); Op = DAG.getNode(ISD::ANY_EXTEND, N->getValueType(0), Op); return DAG.getNode(ISD::SIGN_EXTEND_INREG, Op.getValueType(), Op, DAG.getValueType(N->getOperand(0).getValueType())); } -SDOperand DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){ +SDValue DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){ assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!"); - SDOperand Ch = N->getChain(), Ptr = N->getBasePtr(); + SDValue Ch = N->getChain(), Ptr = N->getBasePtr(); int SVOffset = N->getSrcValueOffset(); unsigned Alignment = N->getAlignment(); bool isVolatile = N->isVolatile(); - SDOperand Val = GetPromotedInteger(N->getValue()); // Get promoted value. + SDValue Val = GetPromotedInteger(N->getValue()); // Get promoted value. assert(!N->isTruncatingStore() && "Cannot promote this store operand!"); @@ -789,13 +789,13 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo){ isVolatile, Alignment); } -SDOperand DAGTypeLegalizer::PromoteIntOp_TRUNCATE(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntOp_TRUNCATE(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); return DAG.getNode(ISD::TRUNCATE, N->getValueType(0), Op); } -SDOperand DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) { - SDOperand Op = GetPromotedInteger(N->getOperand(0)); +SDValue DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) { + SDValue Op = GetPromotedInteger(N->getOperand(0)); Op = DAG.getNode(ISD::ANY_EXTEND, N->getValueType(0), Op); return DAG.getZeroExtendInReg(Op, N->getOperand(0).getValueType()); } @@ -811,8 +811,8 @@ SDOperand DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) { /// know that (at least) one result needs expansion. void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) { DEBUG(cerr << "Expand integer result: "; N->dump(&DAG); cerr << "\n"); - SDOperand Lo, Hi; - Lo = Hi = SDOperand(); + SDValue Lo, Hi; + Lo = Hi = SDValue(); // See if the target wants to custom expand this node. if (TLI.getOperationAction(N->getOpcode(), N->getValueType(ResNo)) == @@ -886,15 +886,15 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) { // If Lo/Hi is null, the sub-method took care of registering results etc. if (Lo.Val) - SetExpandedInteger(SDOperand(N, ResNo), Lo, Hi); + SetExpandedInteger(SDValue(N, ResNo), Lo, Hi); } /// ExpandShiftByConstant - N is a shift by a value that needs to be expanded, /// and the shift amount is a constant 'Amt'. Expand the operation. void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { // Expand the incoming operand to be shifted, so that we have its parts - SDOperand InL, InH; + SDValue InL, InH; GetExpandedInteger(N->getOperand(0), InL, InH); MVT NVT = InL.getValueType(); @@ -914,9 +914,9 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt, } else if (Amt == 1) { // Emit this X << 1 as X+X. SDVTList VTList = DAG.getVTList(NVT, MVT::Flag); - SDOperand LoOps[2] = { InL, InL }; + SDValue LoOps[2] = { InL, InL }; Lo = DAG.getNode(ISD::ADDC, VTList, LoOps, 2); - SDOperand HiOps[3] = { InH, InH, Lo.getValue(1) }; + SDValue HiOps[3] = { InH, InH, Lo.getValue(1) }; Hi = DAG.getNode(ISD::ADDE, VTList, HiOps, 3); } else { Lo = DAG.getNode(ISD::SHL, NVT, InL, DAG.getConstant(Amt, ShTy)); @@ -978,8 +978,8 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt, /// can tell this, we know that it is >= 32 or < 32, without knowing the actual /// shift amount. bool DAGTypeLegalizer:: -ExpandShiftWithKnownAmountBit(SDNode *N, SDOperand &Lo, SDOperand &Hi) { - SDOperand Amt = N->getOperand(1); +ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) { + SDValue Amt = N->getOperand(1); MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); MVT ShTy = Amt.getValueType(); unsigned ShBits = ShTy.getSizeInBits(); @@ -996,7 +996,7 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDOperand &Lo, SDOperand &Hi) { return false; // Get the incoming operand to be shifted. - SDOperand InL, InH; + SDValue InL, InH; GetExpandedInteger(N->getOperand(0), InL, InH); // If we know that any of the high bits of the shift amount are one, then we @@ -1028,7 +1028,7 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDOperand &Lo, SDOperand &Hi) { // can do this as a couple of simple shifts. if ((KnownZero & HighBitMask) == HighBitMask) { // Compute 32-amt. - SDOperand Amt2 = DAG.getNode(ISD::SUB, ShTy, + SDValue Amt2 = DAG.getNode(ISD::SUB, ShTy, DAG.getConstant(NVTBits, ShTy), Amt); unsigned Op1, Op2; @@ -1050,14 +1050,14 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDOperand &Lo, SDOperand &Hi) { } void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { // Expand the subcomponents. - SDOperand LHSL, LHSH, RHSL, RHSH; + SDValue LHSL, LHSH, RHSL, RHSH; GetExpandedInteger(N->getOperand(0), LHSL, LHSH); GetExpandedInteger(N->getOperand(1), RHSL, RHSH); SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag); - SDOperand LoOps[2] = { LHSL, RHSL }; - SDOperand HiOps[3] = { LHSH, RHSH }; + SDValue LoOps[2] = { LHSL, RHSL }; + SDValue HiOps[3] = { LHSH, RHSH }; if (N->getOpcode() == ISD::ADD) { Lo = DAG.getNode(ISD::ADDC, VTList, LoOps, 2); @@ -1071,14 +1071,14 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N, } void DAGTypeLegalizer::ExpandIntRes_ADDSUBC(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { // Expand the subcomponents. - SDOperand LHSL, LHSH, RHSL, RHSH; + SDValue LHSL, LHSH, RHSL, RHSH; GetExpandedInteger(N->getOperand(0), LHSL, LHSH); GetExpandedInteger(N->getOperand(1), RHSL, RHSH); SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag); - SDOperand LoOps[2] = { LHSL, RHSL }; - SDOperand HiOps[3] = { LHSH, RHSH }; + SDValue LoOps[2] = { LHSL, RHSL }; + SDValue HiOps[3] = { LHSH, RHSH }; if (N->getOpcode() == ISD::ADDC) { Lo = DAG.getNode(ISD::ADDC, VTList, LoOps, 2); @@ -1092,18 +1092,18 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBC(SDNode *N, // Legalized the flag result - switch anything that used the old flag to // use the new one. - ReplaceValueWith(SDOperand(N, 1), Hi.getValue(1)); + ReplaceValueWith(SDValue(N, 1), Hi.getValue(1)); } void DAGTypeLegalizer::ExpandIntRes_ADDSUBE(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { // Expand the subcomponents. - SDOperand LHSL, LHSH, RHSL, RHSH; + SDValue LHSL, LHSH, RHSL, RHSH; GetExpandedInteger(N->getOperand(0), LHSL, LHSH); GetExpandedInteger(N->getOperand(1), RHSL, RHSH); SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag); - SDOperand LoOps[3] = { LHSL, RHSL, N->getOperand(2) }; - SDOperand HiOps[3] = { LHSH, RHSH }; + SDValue LoOps[3] = { LHSL, RHSL, N->getOperand(2) }; + SDValue HiOps[3] = { LHSH, RHSH }; Lo = DAG.getNode(N->getOpcode(), VTList, LoOps, 3); HiOps[2] = Lo.getValue(1); @@ -1111,13 +1111,13 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBE(SDNode *N, // Legalized the flag result - switch anything that used the old flag to // use the new one. - ReplaceValueWith(SDOperand(N, 1), Hi.getValue(1)); + ReplaceValueWith(SDValue(N, 1), Hi.getValue(1)); } void DAGTypeLegalizer::ExpandIntRes_ANY_EXTEND(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Op = N->getOperand(0); + SDValue Op = N->getOperand(0); if (Op.getValueType().bitsLE(NVT)) { // The low part is any extension of the input (which degenerates to a copy). Lo = DAG.getNode(ISD::ANY_EXTEND, NVT, Op); @@ -1127,7 +1127,7 @@ void DAGTypeLegalizer::ExpandIntRes_ANY_EXTEND(SDNode *N, // promotes to the result type, so will end up being expanded too. assert(getTypeAction(Op.getValueType()) == PromoteInteger && "Only know how to promote this result!"); - SDOperand Res = GetPromotedInteger(Op); + SDValue Res = GetPromotedInteger(Op); assert(Res.getValueType() == N->getValueType(0) && "Operand over promoted?"); // Split the promoted operand. This will simplify when it is expanded. @@ -1136,7 +1136,7 @@ void DAGTypeLegalizer::ExpandIntRes_ANY_EXTEND(SDNode *N, } void DAGTypeLegalizer::ExpandIntRes_AssertSext(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { GetExpandedInteger(N->getOperand(0), Lo, Hi); MVT NVT = Lo.getValueType(); MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); @@ -1155,7 +1155,7 @@ void DAGTypeLegalizer::ExpandIntRes_AssertSext(SDNode *N, } void DAGTypeLegalizer::ExpandIntRes_AssertZext(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { GetExpandedInteger(N->getOperand(0), Lo, Hi); MVT NVT = Lo.getValueType(); MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); @@ -1173,14 +1173,14 @@ void DAGTypeLegalizer::ExpandIntRes_AssertZext(SDNode *N, } void DAGTypeLegalizer::ExpandIntRes_BSWAP(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { GetExpandedInteger(N->getOperand(0), Hi, Lo); // Note swapped operands. Lo = DAG.getNode(ISD::BSWAP, Lo.getValueType(), Lo); Hi = DAG.getNode(ISD::BSWAP, Hi.getValueType(), Hi); } void DAGTypeLegalizer::ExpandIntRes_Constant(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); unsigned NBitWidth = NVT.getSizeInBits(); const APInt &Cst = cast<ConstantSDNode>(N)->getAPIntValue(); @@ -1189,16 +1189,16 @@ void DAGTypeLegalizer::ExpandIntRes_Constant(SDNode *N, } void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { // ctlz (HiLo) -> Hi != 0 ? ctlz(Hi) : (ctlz(Lo)+32) GetExpandedInteger(N->getOperand(0), Lo, Hi); MVT NVT = Lo.getValueType(); - SDOperand HiNotZero = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi, + SDValue HiNotZero = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi, DAG.getConstant(0, NVT), ISD::SETNE); - SDOperand LoLZ = DAG.getNode(ISD::CTLZ, NVT, Lo); - SDOperand HiLZ = DAG.getNode(ISD::CTLZ, NVT, Hi); + SDValue LoLZ = DAG.getNode(ISD::CTLZ, NVT, Lo); + SDValue HiLZ = DAG.getNode(ISD::CTLZ, NVT, Hi); Lo = DAG.getNode(ISD::SELECT, NVT, HiNotZero, HiLZ, DAG.getNode(ISD::ADD, NVT, LoLZ, @@ -1207,7 +1207,7 @@ void DAGTypeLegalizer::ExpandIntRes_CTLZ(SDNode *N, } void DAGTypeLegalizer::ExpandIntRes_CTPOP(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { // ctpop(HiLo) -> ctpop(Hi)+ctpop(Lo) GetExpandedInteger(N->getOperand(0), Lo, Hi); MVT NVT = Lo.getValueType(); @@ -1217,16 +1217,16 @@ void DAGTypeLegalizer::ExpandIntRes_CTPOP(SDNode *N, } void DAGTypeLegalizer::ExpandIntRes_CTTZ(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { // cttz (HiLo) -> Lo != 0 ? cttz(Lo) : (cttz(Hi)+32) GetExpandedInteger(N->getOperand(0), Lo, Hi); MVT NVT = Lo.getValueType(); - SDOperand LoNotZero = DAG.getSetCC(TLI.getSetCCResultType(Lo), Lo, + SDValue LoNotZero = DAG.getSetCC(TLI.getSetCCResultType(Lo), Lo, DAG.getConstant(0, NVT), ISD::SETNE); - SDOperand LoLZ = DAG.getNode(ISD::CTTZ, NVT, Lo); - SDOperand HiLZ = DAG.getNode(ISD::CTTZ, NVT, Hi); + SDValue LoLZ = DAG.getNode(ISD::CTTZ, NVT, Lo); + SDValue HiLZ = DAG.getNode(ISD::CTTZ, NVT, Hi); Lo = DAG.getNode(ISD::SELECT, NVT, LoNotZero, LoLZ, DAG.getNode(ISD::ADD, NVT, HiLZ, @@ -1234,26 +1234,26 @@ void DAGTypeLegalizer::ExpandIntRes_CTTZ(SDNode *N, Hi = DAG.getConstant(0, NVT); } -void DAGTypeLegalizer::ExpandIntRes_FP_TO_SINT(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandIntRes_FP_TO_SINT(SDNode *N, SDValue &Lo, + SDValue &Hi) { MVT VT = N->getValueType(0); - SDOperand Op = N->getOperand(0); + SDValue Op = N->getOperand(0); RTLIB::Libcall LC = RTLIB::getFPTOSINT(Op.getValueType(), VT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fp-to-sint conversion!"); SplitInteger(MakeLibCall(LC, VT, &Op, 1, true/*sign irrelevant*/), Lo, Hi); } -void DAGTypeLegalizer::ExpandIntRes_FP_TO_UINT(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandIntRes_FP_TO_UINT(SDNode *N, SDValue &Lo, + SDValue &Hi) { MVT VT = N->getValueType(0); - SDOperand Op = N->getOperand(0); + SDValue Op = N->getOperand(0); RTLIB::Libcall LC = RTLIB::getFPTOUINT(Op.getValueType(), VT); assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected fp-to-uint conversion!"); SplitInteger(MakeLibCall(LC, VT, &Op, 1, false/*sign irrelevant*/), Lo, Hi); } void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { if (ISD::isNormalLoad(N)) { ExpandRes_NormalLoad(N, Lo, Hi); return; @@ -1263,8 +1263,8 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N, MVT VT = N->getValueType(0); MVT NVT = TLI.getTypeToTransformTo(VT); - SDOperand Ch = N->getChain(); - SDOperand Ptr = N->getBasePtr(); + SDValue Ch = N->getChain(); + SDValue Ptr = N->getBasePtr(); ISD::LoadExtType ExtType = N->getExtensionType(); int SVOffset = N->getSrcValueOffset(); unsigned Alignment = N->getAlignment(); @@ -1358,12 +1358,12 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N, // Legalized the chain result - switch anything that used the old chain to // use the new one. - ReplaceValueWith(SDOperand(N, 1), Ch); + ReplaceValueWith(SDValue(N, 1), Ch); } void DAGTypeLegalizer::ExpandIntRes_Logical(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { - SDOperand LL, LH, RL, RH; + SDValue &Lo, SDValue &Hi) { + SDValue LL, LH, RL, RH; GetExpandedInteger(N->getOperand(0), LL, LH); GetExpandedInteger(N->getOperand(1), RL, RH); Lo = DAG.getNode(N->getOpcode(), LL.getValueType(), LL, RL); @@ -1371,7 +1371,7 @@ void DAGTypeLegalizer::ExpandIntRes_Logical(SDNode *N, } void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT VT = N->getValueType(0); MVT NVT = TLI.getTypeToTransformTo(VT); @@ -1380,7 +1380,7 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N, bool HasSMUL_LOHI = TLI.isOperationLegal(ISD::SMUL_LOHI, NVT); bool HasUMUL_LOHI = TLI.isOperationLegal(ISD::UMUL_LOHI, NVT); if (HasMULHU || HasMULHS || HasUMUL_LOHI || HasSMUL_LOHI) { - SDOperand LL, LH, RL, RH; + SDValue LL, LH, RL, RH; GetExpandedInteger(N->getOperand(0), LL, LH); GetExpandedInteger(N->getOperand(1), RL, RH); unsigned OuterBitSize = VT.getSizeInBits(); @@ -1395,7 +1395,7 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N, if (HasUMUL_LOHI) { // We can emit a umul_lohi. Lo = DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL); - Hi = SDOperand(Lo.Val, 1); + Hi = SDValue(Lo.Val, 1); return; } if (HasMULHU) { @@ -1410,7 +1410,7 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N, if (HasSMUL_LOHI) { // We can emit a smul_lohi. Lo = DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL); - Hi = SDOperand(Lo.Val, 1); + Hi = SDValue(Lo.Val, 1); return; } if (HasMULHS) { @@ -1422,7 +1422,7 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N, } if (HasUMUL_LOHI) { // Lo,Hi = umul LHS, RHS. - SDOperand UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, + SDValue UMulLOHI = DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(NVT, NVT), LL, RL); Lo = UMulLOHI; Hi = UMulLOHI.getValue(1); @@ -1453,12 +1453,12 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N, LC = RTLIB::MUL_I128; assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported MUL!"); - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; SplitInteger(MakeLibCall(LC, VT, Ops, 2, true/*sign irrelevant*/), Lo, Hi); } void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT VT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; @@ -1470,12 +1470,12 @@ void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N, LC = RTLIB::SDIV_I128; assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!"); - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; SplitInteger(MakeLibCall(LC, VT, Ops, 2, true), Lo, Hi); } void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT VT = N->getValueType(0); // If we can emit an efficient shift operation, do so now. Check to see if @@ -1506,10 +1506,10 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N, if ((Action == TargetLowering::Legal && TLI.isTypeLegal(NVT)) || Action == TargetLowering::Custom) { // Expand the subcomponents. - SDOperand LHSL, LHSH; + SDValue LHSL, LHSH; GetExpandedInteger(N->getOperand(0), LHSL, LHSH); - SDOperand Ops[] = { LHSL, LHSH, N->getOperand(1) }; + SDValue Ops[] = { LHSL, LHSH, N->getOperand(1) }; MVT VT = LHSL.getValueType(); Lo = DAG.getNode(PartsOpc, DAG.getNodeValueTypes(VT, VT), 2, Ops, 3); Hi = Lo.getValue(1); @@ -1547,14 +1547,14 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N, } assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported shift!"); - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; SplitInteger(MakeLibCall(LC, VT, Ops, 2, isSigned), Lo, Hi); } void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Op = N->getOperand(0); + SDValue Op = N->getOperand(0); if (Op.getValueType().bitsLE(NVT)) { // The low part is sign extension of the input (which degenerates to a copy). Lo = DAG.getNode(ISD::SIGN_EXTEND, NVT, N->getOperand(0)); @@ -1567,7 +1567,7 @@ void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N, // promotes to the result type, so will end up being expanded too. assert(getTypeAction(Op.getValueType()) == PromoteInteger && "Only know how to promote this result!"); - SDOperand Res = GetPromotedInteger(Op); + SDValue Res = GetPromotedInteger(Op); assert(Res.getValueType() == N->getValueType(0) && "Operand over promoted?"); // Split the promoted operand. This will simplify when it is expanded. @@ -1580,7 +1580,7 @@ void DAGTypeLegalizer::ExpandIntRes_SIGN_EXTEND(SDNode *N, } void DAGTypeLegalizer:: -ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDOperand &Lo, SDOperand &Hi) { +ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDValue &Lo, SDValue &Hi) { GetExpandedInteger(N->getOperand(0), Lo, Hi); MVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); @@ -1605,7 +1605,7 @@ ExpandIntRes_SIGN_EXTEND_INREG(SDNode *N, SDOperand &Lo, SDOperand &Hi) { } void DAGTypeLegalizer::ExpandIntRes_SREM(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT VT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; @@ -1617,12 +1617,12 @@ void DAGTypeLegalizer::ExpandIntRes_SREM(SDNode *N, LC = RTLIB::SREM_I128; assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!"); - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; SplitInteger(MakeLibCall(LC, VT, Ops, 2, true), Lo, Hi); } void DAGTypeLegalizer::ExpandIntRes_TRUNCATE(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); Lo = DAG.getNode(ISD::TRUNCATE, NVT, N->getOperand(0)); Hi = DAG.getNode(ISD::SRL, N->getOperand(0).getValueType(), N->getOperand(0), @@ -1632,7 +1632,7 @@ void DAGTypeLegalizer::ExpandIntRes_TRUNCATE(SDNode *N, } void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT VT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; @@ -1644,12 +1644,12 @@ void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N, LC = RTLIB::UDIV_I128; assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported UDIV!"); - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; SplitInteger(MakeLibCall(LC, VT, Ops, 2, false), Lo, Hi); } void DAGTypeLegalizer::ExpandIntRes_UREM(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT VT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; @@ -1661,14 +1661,14 @@ void DAGTypeLegalizer::ExpandIntRes_UREM(SDNode *N, LC = RTLIB::UREM_I128; assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported UREM!"); - SDOperand Ops[2] = { N->getOperand(0), N->getOperand(1) }; + SDValue Ops[2] = { N->getOperand(0), N->getOperand(1) }; SplitInteger(MakeLibCall(LC, VT, Ops, 2, false), Lo, Hi); } void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Op = N->getOperand(0); + SDValue Op = N->getOperand(0); if (Op.getValueType().bitsLE(NVT)) { // The low part is zero extension of the input (which degenerates to a copy). Lo = DAG.getNode(ISD::ZERO_EXTEND, NVT, N->getOperand(0)); @@ -1678,7 +1678,7 @@ void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N, // promotes to the result type, so will end up being expanded too. assert(getTypeAction(Op.getValueType()) == PromoteInteger && "Only know how to promote this result!"); - SDOperand Res = GetPromotedInteger(Op); + SDValue Res = GetPromotedInteger(Op); assert(Res.getValueType() == N->getValueType(0) && "Operand over promoted?"); // Split the promoted operand. This will simplify when it is expanded. @@ -1700,11 +1700,11 @@ void DAGTypeLegalizer::ExpandIntRes_ZERO_EXTEND(SDNode *N, /// node may need promotion or expansion as well as the specified one. bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) { DEBUG(cerr << "Expand integer operand: "; N->dump(&DAG); cerr << "\n"); - SDOperand Res = SDOperand(); + SDValue Res = SDValue(); if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType()) == TargetLowering::Custom) - Res = TLI.LowerOperation(SDOperand(N, OpNo), DAG); + Res = TLI.LowerOperation(SDValue(N, OpNo), DAG); if (Res.Val == 0) { switch (N->getOpcode()) { @@ -1746,16 +1746,16 @@ bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) { assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 && "Invalid operand expansion"); - ReplaceValueWith(SDOperand(N, 0), Res); + ReplaceValueWith(SDValue(N, 0), Res); return false; } /// IntegerExpandSetCCOperands - Expand the operands of a comparison. This code /// is shared among BR_CC, SELECT_CC, and SETCC handlers. -void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDOperand &NewLHS, - SDOperand &NewRHS, +void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDValue &NewLHS, + SDValue &NewRHS, ISD::CondCode &CCCode) { - SDOperand LHSLo, LHSHi, RHSLo, RHSHi; + SDValue LHSLo, LHSHi, RHSLo, RHSHi; GetExpandedInteger(NewLHS, LHSLo, LHSHi); GetExpandedInteger(NewRHS, RHSLo, RHSHi); @@ -1811,7 +1811,7 @@ void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDOperand &NewLHS, // NOTE: on targets without efficient SELECT of bools, we can always use // this identity: (B1 ? B2 : B3) --> (B1 & B2)|(!B1&B3) TargetLowering::DAGCombinerInfo DagCombineInfo(DAG, false, true, NULL); - SDOperand Tmp1, Tmp2; + SDValue Tmp1, Tmp2; Tmp1 = TLI.SimplifySetCC(TLI.getSetCCResultType(LHSLo), LHSLo, RHSLo, LowCC, false, DagCombineInfo); if (!Tmp1.Val) @@ -1835,7 +1835,7 @@ void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDOperand &NewLHS, // For LE / GE, if high part is known false, ignore the low part. // For LT / GT, if high part is known true, ignore the low part. NewLHS = Tmp2; - NewRHS = SDOperand(); + NewRHS = SDValue(); return; } @@ -1846,11 +1846,11 @@ void DAGTypeLegalizer::IntegerExpandSetCCOperands(SDOperand &NewLHS, ISD::SETEQ); NewLHS = DAG.getNode(ISD::SELECT, Tmp1.getValueType(), NewLHS, Tmp1, Tmp2); - NewRHS = SDOperand(); + NewRHS = SDValue(); } -SDOperand DAGTypeLegalizer::ExpandIntOp_BR_CC(SDNode *N) { - SDOperand NewLHS = N->getOperand(2), NewRHS = N->getOperand(3); +SDValue DAGTypeLegalizer::ExpandIntOp_BR_CC(SDNode *N) { + SDValue NewLHS = N->getOperand(2), NewRHS = N->getOperand(3); ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(1))->get(); IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode); @@ -1862,13 +1862,13 @@ SDOperand DAGTypeLegalizer::ExpandIntOp_BR_CC(SDNode *N) { } // Update N to have the operands specified. - return DAG.UpdateNodeOperands(SDOperand(N, 0), N->getOperand(0), + return DAG.UpdateNodeOperands(SDValue(N, 0), N->getOperand(0), DAG.getCondCode(CCCode), NewLHS, NewRHS, N->getOperand(4)); } -SDOperand DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) { - SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); +SDValue DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) { + SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(4))->get(); IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode); @@ -1880,13 +1880,13 @@ SDOperand DAGTypeLegalizer::ExpandIntOp_SELECT_CC(SDNode *N) { } // Update N to have the operands specified. - return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS, + return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS, N->getOperand(2), N->getOperand(3), DAG.getCondCode(CCCode)); } -SDOperand DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) { - SDOperand NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); +SDValue DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) { + SDValue NewLHS = N->getOperand(0), NewRHS = N->getOperand(1); ISD::CondCode CCCode = cast<CondCodeSDNode>(N->getOperand(2))->get(); IntegerExpandSetCCOperands(NewLHS, NewRHS, CCCode); @@ -1898,12 +1898,12 @@ SDOperand DAGTypeLegalizer::ExpandIntOp_SETCC(SDNode *N) { } // Otherwise, update N to have the operands specified. - return DAG.UpdateNodeOperands(SDOperand(N, 0), NewLHS, NewRHS, + return DAG.UpdateNodeOperands(SDValue(N, 0), NewLHS, NewRHS, DAG.getCondCode(CCCode)); } -SDOperand DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDNode *N) { - SDOperand Op = N->getOperand(0); +SDValue DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDNode *N) { + SDValue Op = N->getOperand(0); MVT DstVT = N->getValueType(0); RTLIB::Libcall LC = RTLIB::getSINTTOFP(Op.getValueType(), DstVT); assert(LC != RTLIB::UNKNOWN_LIBCALL && @@ -1911,7 +1911,7 @@ SDOperand DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDNode *N) { return MakeLibCall(LC, DstVT, &Op, 1, true); } -SDOperand DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) { if (ISD::isNormalStore(N)) return ExpandOp_NormalStore(N, OpNo); @@ -1920,12 +1920,12 @@ SDOperand DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) { MVT VT = N->getOperand(1).getValueType(); MVT NVT = TLI.getTypeToTransformTo(VT); - SDOperand Ch = N->getChain(); - SDOperand Ptr = N->getBasePtr(); + SDValue Ch = N->getChain(); + SDValue Ptr = N->getBasePtr(); int SVOffset = N->getSrcValueOffset(); unsigned Alignment = N->getAlignment(); bool isVolatile = N->isVolatile(); - SDOperand Lo, Hi; + SDValue Lo, Hi; assert(NVT.isByteSized() && "Expanded type not byte sized!"); @@ -1990,21 +1990,21 @@ SDOperand DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) { } } -SDOperand DAGTypeLegalizer::ExpandIntOp_TRUNCATE(SDNode *N) { - SDOperand InL, InH; +SDValue DAGTypeLegalizer::ExpandIntOp_TRUNCATE(SDNode *N) { + SDValue InL, InH; GetExpandedInteger(N->getOperand(0), InL, InH); // Just truncate the low part of the source. return DAG.getNode(ISD::TRUNCATE, N->getValueType(0), InL); } -SDOperand DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) { - SDOperand Op = N->getOperand(0); +SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) { + SDValue Op = N->getOperand(0); MVT SrcVT = Op.getValueType(); MVT DstVT = N->getValueType(0); if (TLI.getOperationAction(ISD::SINT_TO_FP, SrcVT) == TargetLowering::Custom){ // Do a signed conversion then adjust the result. - SDOperand SignedConv = DAG.getNode(ISD::SINT_TO_FP, DstVT, Op); + SDValue SignedConv = DAG.getNode(ISD::SINT_TO_FP, DstVT, Op); SignedConv = TLI.LowerOperation(SignedConv, DAG); // The result of the signed conversion needs adjusting if the 'sign bit' of @@ -2026,27 +2026,27 @@ SDOperand DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) { assert(false && "Unsupported UINT_TO_FP!"); // Check whether the sign bit is set. - SDOperand Lo, Hi; + SDValue Lo, Hi; GetExpandedInteger(Op, Lo, Hi); - SDOperand SignSet = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi, + SDValue SignSet = DAG.getSetCC(TLI.getSetCCResultType(Hi), Hi, DAG.getConstant(0, Hi.getValueType()), ISD::SETLT); // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. - SDOperand FudgePtr = DAG.getConstantPool(ConstantInt::get(FF.zext(64)), + SDValue FudgePtr = DAG.getConstantPool(ConstantInt::get(FF.zext(64)), TLI.getPointerTy()); // Get a pointer to FF if the sign bit was set, or to 0 otherwise. - SDOperand Zero = DAG.getIntPtrConstant(0); - SDOperand Four = DAG.getIntPtrConstant(4); + SDValue Zero = DAG.getIntPtrConstant(0); + SDValue Four = DAG.getIntPtrConstant(4); if (TLI.isBigEndian()) std::swap(Zero, Four); - SDOperand Offset = DAG.getNode(ISD::SELECT, Zero.getValueType(), SignSet, + SDValue Offset = DAG.getNode(ISD::SELECT, Zero.getValueType(), SignSet, Zero, Four); FudgePtr = DAG.getNode(ISD::ADD, TLI.getPointerTy(), FudgePtr, Offset); // Load the value out, extending it from f32 to the destination float type. // FIXME: Avoid the extend by constructing the right constant pool? - SDOperand Fudge = DAG.getExtLoad(ISD::EXTLOAD, DstVT, DAG.getEntryNode(), + SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, DstVT, DAG.getEntryNode(), FudgePtr, NULL, 0, MVT::f32); return DAG.getNode(ISD::FADD, DstVT, SignedConv, Fudge); } diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index 1d65e97003..9eaf326896 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -29,7 +29,7 @@ void DAGTypeLegalizer::run() { // The root of the dag may dangle to deleted nodes until the type legalizer is // done. Set it to null to avoid confusion. - DAG.setRoot(SDOperand()); + DAG.setRoot(SDValue()); // Walk all nodes in the graph, assigning them a NodeID of 'ReadyToProcess' // (and remembering them) if they are leaves and assigning 'NewNode' if @@ -239,11 +239,11 @@ void DAGTypeLegalizer::AnalyzeNewNode(SDNode *&N) { // replaced them, which can result in our node changing. Since remapping // is rare, the code tries to minimize overhead in the non-remapping case. - SmallVector<SDOperand, 8> NewOps; + SmallVector<SDValue, 8> NewOps; unsigned NumProcessed = 0; for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { - SDOperand OrigOp = N->getOperand(i); - SDOperand Op = OrigOp; + SDValue OrigOp = N->getOperand(i); + SDValue Op = OrigOp; if (Op.Val->getNodeId() == Processed) RemapNode(Op); @@ -266,7 +266,7 @@ void DAGTypeLegalizer::AnalyzeNewNode(SDNode *&N) { // Some operands changed - update the node. if (!NewOps.empty()) - N = DAG.UpdateNodeOperands(SDOperand(N, 0), &NewOps[0], NewOps.size()).Val; + N = DAG.UpdateNodeOperands(SDValue(N, 0), &NewOps[0], NewOps.size()).Val; N->setNodeId(N->getNumOperands()-NumProcessed); if (N->getNodeId() == ReadyToProcess) @@ -308,7 +308,7 @@ namespace { /// ReplaceValueWith - The specified value was legalized to the specified other /// value. If they are different, update the DAG and NodeIDs replacing any uses /// of From to use To instead. -void DAGTypeLegalizer::ReplaceValueWith(SDOperand From, SDOperand To) { +void DAGTypeLegalizer::ReplaceValueWith(SDValue From, SDValue To) { if (From == To) return; // If expansion produced new nodes, make sure they are properly marked. @@ -347,14 +347,14 @@ void DAGTypeLegalizer::ReplaceNodeWith(SDNode *From, SDNode *To) { for (unsigned i = 0, e = From->getNumValues(); i != e; ++i) { assert(From->getValueType(i) == To->getValueType(i) && "Node results don't match"); - ReplacedNodes[SDOperand(From, i)] = SDOperand(To, i); + ReplacedNodes[SDValue(From, i)] = SDValue(To, i); } } /// RemapNode - If the specified value was already legalized to another value, /// replace it by that value. -void DAGTypeLegalizer::RemapNode(SDOperand &N) { - DenseMap<SDOperand, SDOperand>::iterator I = ReplacedNodes.find(N); +void DAGTypeLegalizer::RemapNode(SDValue &N) { + DenseMap<SDValue, SDValue>::iterator I = ReplacedNodes.find(N); if (I != ReplacedNodes.end()) { // Use path compression to speed up future lookups if values get multiply // replaced with other values. @@ -383,7 +383,7 @@ void DAGTypeLegalizer::ExpungeNode(SDNode *N) { // If N is not remapped by ReplacedNodes then there is nothing to do. unsigned i, e; for (i = 0, e = N->getNumValues(); i != e; ++i) - if (ReplacedNodes.find(SDOperand(N, i)) != ReplacedNodes.end()) + if (ReplacedNodes.find(SDValue(N, i)) != ReplacedNodes.end()) break; if (i == e) @@ -391,80 +391,80 @@ void DAGTypeLegalizer::ExpungeNode(SDNode *N) { // Remove N from all maps - this is expensive but rare. - for (DenseMap<SDOperand, SDOperand>::iterator I = PromotedIntegers.begin(), + for (DenseMap<SDValue, SDValue>::iterator I = PromotedIntegers.begin(), E = PromotedIntegers.end(); I != E; ++I) { assert(I->first.Val != N); RemapNode(I->second); } - for (DenseMap<SDOperand, SDOperand>::iterator I = SoftenedFloats.begin(), + for (DenseMap<SDValue, SDValue>::iterator I = SoftenedFloats.begin(), E = SoftenedFloats.end(); I != E; ++I) { assert(I->first.Val != N); RemapNode(I->second); } - for (DenseMap<SDOperand, SDOperand>::iterator I = ScalarizedVectors.begin(), + for (DenseMap<SDValue, SDValue>::iterator I = ScalarizedVectors.begin(), E = ScalarizedVectors.end(); I != E; ++I) { assert(I->first.Val != N); RemapNode(I->second); } - for (DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator + for (DenseMap<SDValue, std::pair<SDValue, SDValue> >::iterator I = ExpandedIntegers.begin(), E = ExpandedIntegers.end(); I != E; ++I){ assert(I->first.Val != N); RemapNode(I->second.first); RemapNode(I->second.second); } - for (DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator + for (DenseMap<SDValue, std::pair<SDValue, SDValue> >::iterator I = ExpandedFloats.begin(), E = ExpandedFloats.end(); I != E; ++I) { assert(I->first.Val != N); RemapNode(I->second.first); RemapNode(I->second.second); } - for (DenseMap<SDOperand, std::pair<SDOperand, SDOperand> >::iterator + for (DenseMap<SDValue, std::pair<SDValue, SDValue> >::iterator I = SplitVectors.begin(), E = SplitVectors.end(); I != E; ++I) { assert(I->first.Val != N); RemapNode(I->second.first); RemapNode(I->second.second); } - for (DenseMap<SDOperand, SDOperand>::iterator I = ReplacedNodes.begin(), + for (DenseMap<SDValue, SDValue>::iterator I = ReplacedNodes.begin(), E = ReplacedNodes.end(); I != E; ++I) RemapNode(I->second); for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) - ReplacedNodes.erase(SDOperand(N, i)); + ReplacedNodes.erase(SDValue(N, i)); } -void DAGTypeLegalizer::SetPromotedInteger(SDOperand Op, SDOperand Result) { +void DAGTypeLegalizer::SetPromotedInteger(SDValue Op, SDValue Result) { AnalyzeNewNode(Result.Val); - SDOperand &OpEntry = PromotedIntegers[Op]; + SDValue &OpEntry = PromotedIntegers[Op]; assert(OpEntry.Val == 0 && "Node is already promoted!"); OpEntry = Result; } -void DAGTypeLegalizer::SetSoftenedFloat(SDOperand Op, SDOperand Result) { +void DAGTypeLegalizer::SetSoftenedFloat(SDValue Op, SDValue Result) { AnalyzeNewNode(Result.Val); - SDOperand &OpEntry = SoftenedFloats[Op]; + SDValue &OpEntry = SoftenedFloats[Op]; assert(OpEntry.Val == 0 && "Node is already converted to integer!"); OpEntry = Result; } -void DAGTypeLegalizer::SetScalarizedVector(SDOperand Op, SDOperand Result) { +void DAGTypeLegalizer::SetScalarizedVector(SDValue Op, SDValue Result) { AnalyzeNewNode(Result.Val); - SDOperand &OpEntry = ScalarizedVectors[Op]; + SDValue &OpEntry = ScalarizedVectors[Op]; assert(OpEntry.Val == 0 && "Node is already scalarized!"); OpEntry = Result; } -void DAGTypeLegalizer::GetExpandedInteger(SDOperand Op, SDOperand &Lo, - SDOperand &Hi) { - std::pair<SDOperand, SDOperand> &Entry = ExpandedIntegers[Op]; +void DAGTypeLegalizer::GetExpandedInteger(SDValue Op, SDValue &Lo, + SDValue &Hi) { + std::pair<SDValue, SDValue> &Entry = ExpandedIntegers[Op]; RemapNode(Entry.first); RemapNode(Entry.second); assert(Entry.first.Val && "Operand isn't expanded"); @@ -472,22 +472,22 @@ void DAGTypeLegalizer::GetExpandedInteger(SDOperand Op, SDOperand &Lo, Hi = Entry.second; } -void DAGTypeLegalizer::SetExpandedInteger(SDOperand Op, SDOperand Lo, - SDOperand Hi) { +void DAGTypeLegalizer::SetExpandedInteger(SDValue Op, SDValue Lo, + SDValue Hi) { // Lo/Hi may have been newly allocated, if so, add nodeid's as relevant. AnalyzeNewNode(Lo.Val); AnalyzeNewNode(Hi.Val); // Remember that this is the result of the node. - std::pair<SDOperand, SDOperand> &Entry = ExpandedIntegers[Op]; + std::pair<SDValue, SDValue> &Entry = ExpandedIntegers[Op]; assert(Entry.first.Val == 0 && "Node already expanded"); Entry.first = Lo; Entry.second = Hi; } -void DAGTypeLegalizer::GetExpandedFloat(SDOperand Op, SDOperand &Lo, - SDOperand &Hi) { - std::pair<SDOperand, SDOperand> &Entry = ExpandedFloats[Op]; +void DAGTypeLegalizer::GetExpandedFloat(SDValue Op, SDValue &Lo, + SDValue &Hi) { + std::pair<SDValue, SDValue> &Entry = ExpandedFloats[Op]; RemapNode(Entry.first); RemapNode(Entry.second); assert(Entry.first.Val && "Operand isn't expanded"); @@ -495,22 +495,22 @@ void DAGTypeLegalizer::GetExpandedFloat(SDOperand Op, SDOperand &Lo, Hi = Entry.second; } -void DAGTypeLegalizer::SetExpandedFloat(SDOperand Op, SDOperand Lo, - SDOperand Hi) { +void DAGTypeLegalizer::SetExpandedFloat(SDValue Op, SDValue Lo, + SDValue Hi) { // Lo/Hi may have been newly allocated, if so, add nodeid's as relevant. AnalyzeNewNode(Lo.Val); AnalyzeNewNode(Hi.Val); // Remember that this is the result of the node. - std::pair<SDOperand, SDOperand> &Entry = ExpandedFloats[Op]; + std::pair<SDValue, SDValue> &Entry = ExpandedFloats[Op]; assert(Entry.first.Val == 0 && "Node already expanded"); Entry.first = Lo; Entry.second = Hi; } -void DAGTypeLegalizer::GetSplitVector(SDOperand Op, SDOperand &Lo, - SDOperand &Hi) { - std::pair<SDOperand, SDOperand> &Entry = SplitVectors[Op]; +void DAGTypeLegalizer::GetSplitVector(SDValue Op, SDValue &Lo, + SDValue &Hi) { + std::pair<SDValue, SDValue> &Entry = SplitVectors[Op]; RemapNode(Entry.first); RemapNode(Entry.second); assert(Entry.first.Val && "Operand isn't split"); @@ -518,14 +518,14 @@ void DAGTypeLegalizer::GetSplitVector(SDOperand Op, SDOperand &Lo, Hi = Entry.second; } -void DAGTypeLegalizer::SetSplitVector(SDOperand Op, SDOperand Lo, - SDOperand Hi) { +void DAGTypeLegalizer::SetSplitVector(SDValue Op, SDValue Lo, + SDValue Hi) { // Lo/Hi may have been newly allocated, if so, add nodeid's as relevant. AnalyzeNewNode(Lo.Val); AnalyzeNewNode(Hi.Val); // Remember that this is the result of the node. - std::pair<SDOperand, SDOperand> &Entry = SplitVectors[Op]; + std::pair<SDValue, SDValue> &Entry = SplitVectors[Op]; assert(Entry.first.Val == 0 && "Node already split"); Entry.first = Lo; Entry.second = Hi; @@ -537,27 +537,27 @@ void DAGTypeLegalizer::SetSplitVector(SDOperand Op, SDOperand Lo, //===----------------------------------------------------------------------===// /// BitConvertToInteger - Convert to an integer of the same size. -SDOperand DAGTypeLegalizer::BitConvertToInteger(SDOperand Op) { +SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) { unsigned BitWidth = Op.getValueType().getSizeInBits(); return DAG.getNode(ISD::BIT_CONVERT, MVT::getIntegerVT(BitWidth), Op); } -SDOperand DAGTypeLegalizer::CreateStackStoreLoad(SDOperand Op, - MVT DestVT) { +SDValue DAGTypeLegalizer::CreateStackStoreLoad(SDValue Op, + MVT DestVT) { // Create the stack frame object. Make sure it is aligned for both // the source and destination types. unsigned SrcAlign = TLI.getTargetData()->getPrefTypeAlignment(Op.getValueType().getTypeForMVT()); - SDOperand FIPtr = DAG.CreateStackTemporary(DestVT, SrcAlign); + SDValue FIPtr = DAG.CreateStackTemporary(DestVT, SrcAlign); // Emit a store to the stack slot. - SDOperand Store = DAG.getStore(DAG.getEntryNode(), Op, FIPtr, NULL, 0); + SDValue Store = DAG.getStore(DAG.getEntryNode(), Op, FIPtr, NULL, 0); // Result is a load from the stack slot. return DAG.getLoad(DestVT, Store, FIPtr, NULL, 0); } /// JoinIntegers - Build an integer with low bits Lo and high bits Hi. -SDOperand DAGTypeLegalizer::JoinIntegers(SDOperand Lo, SDOperand Hi) { +SDValue DAGTypeLegalizer::JoinIntegers(SDValue Lo, SDValue Hi) { MVT LVT = Lo.getValueType(); MVT HVT = Hi.getValueType(); MVT NVT = MVT::getIntegerVT(LVT.getSizeInBits() + HVT.getSizeInBits()); @@ -571,9 +571,9 @@ SDOperand DAGTypeLegalizer::JoinIntegers(SDOperand Lo, SDOperand Hi) { /// SplitInteger - Return the lower LoVT bits of Op in Lo and the upper HiVT /// bits in Hi. -void DAGTypeLegalizer::SplitInteger(SDOperand Op, +void DAGTypeLegalizer::SplitInteger(SDValue Op, MVT LoVT, MVT HiVT, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { assert(LoVT.getSizeInBits() + HiVT.getSizeInBits() == Op.getValueType().getSizeInBits() && "Invalid integer splitting!"); Lo = DAG.getNode(ISD::TRUNCATE, LoVT, Op); @@ -585,17 +585,17 @@ void DAGTypeLegalizer::SplitInteger(SDOperand Op, /// SplitInteger - Return the lower and upper halves of Op's bits in a value type /// half the size of Op's. -void DAGTypeLegalizer::SplitInteger(SDOperand Op, - SDOperand &Lo, SDOperand &Hi) { +void DAGTypeLegalizer::SplitInteger(SDValue Op, + SDValue &Lo, SDValue &Hi) { MVT HalfVT = MVT::getIntegerVT(Op.getValueType().getSizeInBits()/2); SplitInteger(Op, HalfVT, HalfVT, Lo, Hi); } /// MakeLibCall - Generate a libcall taking the given operands as arguments and /// returning a result of type RetVT. -SDOperand DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT RetVT, - const SDOperand *Ops, unsigned NumOps, - bool isSigned) { +SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT RetVT, + const SDValue *Ops, unsigned NumOps, + bool isSigned) { TargetLowering::ArgListTy Args; Args.reserve(NumOps); @@ -607,18 +607,18 @@ SDOperand DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, MVT RetVT, Entry.isZExt = !isSigned; Args.push_back(Entry); } - SDOperand Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), + SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC), TLI.getPointerTy()); const Type *RetTy = RetVT.getTypeForMVT(); - std::pair<SDOperand,SDOperand> CallInfo = + std::pair<SDValue,SDValue> CallInfo = TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false, CallingConv::C, false, Callee, Args, DAG); return CallInfo.first; } -SDOperand DAGTypeLegalizer::GetVectorElementPointer(SDOperand VecPtr, MVT EltVT, - SDOperand Index) { +SDValue DAGTypeLegalizer::GetVectorElementPointer(SDValue VecPtr, MVT EltVT, + SDValue Index) { // Make sure the index type is big enough to compute in. if (Index.getValueType().bitsGT(TLI.getPointerTy())) Index = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), Index); diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h index c396ffa0ca..d701c6b25e 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -112,31 +112,31 @@ private: /// PromotedIntegers - For integer nodes that are below legal width, this map /// indicates what promoted value to use. - DenseMap<SDOperand, SDOperand> PromotedIntegers; + DenseMap<SDValue, SDValue> PromotedIntegers; /// ExpandedIntegers - For integer nodes that need to be expanded this map /// indicates which operands are the expanded version of the input. - DenseMap<SDOperand, std::pair<SDOperand, SDOperand> > ExpandedIntegers; + DenseMap<SDValue, std::pair<SDValue, SDValue> > ExpandedIntegers; /// SoftenedFloats - For floating point nodes converted to integers of /// the same size, this map indicates the converted value to use. - DenseMap<SDOperand, SDOperand> SoftenedFloats; + DenseMap<SDValue, SDValue> SoftenedFloats; /// ExpandedFloats - For float nodes that need to be expanded this map /// indicates which operands are the expanded version of the input. - DenseMap<SDOperand, std::pair<SDOperand, SDOperand> > ExpandedFloats; + DenseMap<SDValue, std::pair<SDValue, SDValue> > ExpandedFloats; /// ScalarizedVectors - For nodes that are <1 x ty>, this map indicates the /// scalar value of type 'ty' to use. - DenseMap<SDOperand, SDOperand> ScalarizedVectors; + DenseMap<SDValue, SDValue> ScalarizedVectors; /// SplitVectors - For nodes that need to be split this map indicates /// which operands are the expanded version of the input. - DenseMap<SDOperand, std::pair<SDOperand, SDOperand> > SplitVectors; + DenseMap<SDValue, std::pair<SDValue, SDValue> > SplitVectors; /// ReplacedNodes - For nodes that have been replaced with another, /// indicates the replacement node to use. - DenseMap<SDOperand, SDOperand> ReplacedNodes; + DenseMap<SDValue, SDValue> ReplacedNodes; /// Worklist - This defines a worklist of nodes to process. In order to be /// pushed onto this worklist, all operands of a node must have already been @@ -164,47 +164,47 @@ public: ExpungeNode(Old); ExpungeNode(New); for (unsigned i = 0, e = Old->getNumValues(); i != e; ++i) - ReplacedNodes[SDOperand(Old, i)] = SDOperand(New, i); + ReplacedNodes[SDValue(Old, i)] = SDValue(New, i); } private: void AnalyzeNewNode(SDNode *&N); - void ReplaceValueWith(SDOperand From, SDOperand To); + void ReplaceValueWith(SDValue From, SDValue To); void ReplaceNodeWith(SDNode *From, SDNode *To); - void RemapNode(SDOperand &N); + void RemapNode(SDValue &N); void ExpungeNode(SDNode *N); // Common routines. - SDOperand CreateStackStoreLoad(SDOperand Op, MVT DestVT); - SDOperand MakeLibCall(RTLIB::Libcall LC, MVT RetVT, - const SDOperand *Ops, unsigned NumOps, bool isSigned); + SDValue CreateStackStoreLoad(SDValue Op, MVT DestVT); + SDValue MakeLibCall(RTLIB::Libcall LC, MVT RetVT, + const SDValue *Ops, unsigned NumOps, bool isSigned); - SDOperand BitConvertToInteger(SDOperand Op); - SDOperand JoinIntegers(SDOperand Lo, SDOperand Hi); - void SplitInteger(SDOperand Op, SDOperand &Lo, SDOperand &Hi); - void SplitInteger(SDOperand Op, MVT LoVT, MVT HiVT, - SDOperand &Lo, SDOperand &Hi); + SDValue BitConvertToInteger(SDValue Op); + SDValue JoinIntegers(SDValue Lo, SDValue Hi); + void SplitInteger(SDValue Op, SDValue &Lo, SDValue &Hi); + void SplitInteger(SDValue Op, MVT LoVT, MVT HiVT, + SDValue &Lo, SDValue &Hi); - SDOperand GetVectorElementPointer(SDOperand VecPtr, MVT EltVT, - SDOperand Index); + SDValue GetVectorElementPointer(SDValue VecPtr, MVT EltVT, + SDValue Index); //===--------------------------------------------------------------------===// // Integer Promotion Support: LegalizeIntegerTypes.cpp //===--------------------------------------------------------------------===// - SDOperand GetPromotedInteger(SDOperand Op) { - SDOperand &PromotedOp = PromotedIntegers[Op]; + SDValue GetPromotedInteger(SDValue Op) { + SDValue &PromotedOp = PromotedIntegers[Op]; RemapNode(PromotedOp); assert(PromotedOp.Val && "Operand wasn't promoted?"); return PromotedOp; } - void SetPromotedInteger(SDOperand Op, SDOperand Result); + void SetPromotedInteger(SDValue Op, SDValue Result); /// ZExtPromotedInteger - Get a promoted operand and zero extend it to the /// final size. - SDOperand ZExtPromotedInteger(SDOperand Op) { + SDValue ZExtPromotedInteger(SDValue Op) { MVT OldVT = Op.getValueType(); Op = GetPromotedInteger(Op); return DAG.getZeroExtendInReg(Op, OldVT); @@ -212,251 +212,251 @@ private: // Integer Result Promotion. void PromoteIntegerResult(SDNode *N, unsigned ResNo); - SDOperand PromoteIntRes_AssertSext(SDNode *N); - SDOperand PromoteIntRes_AssertZext(SDNode *N); - SDOperand PromoteIntRes_BIT_CONVERT(SDNode *N); - SDOperand PromoteIntRes_BSWAP(SDNode *N); - SDOperand PromoteIntRes_BUILD_PAIR(SDNode *N); - SDOperand PromoteIntRes_Constant(SDNode *N); - SDOperand PromoteIntRes_CTLZ(SDNode *N); - SDOperand PromoteIntRes_CTPOP(SDNode *N); - SDOperand PromoteIntRes_CTTZ(SDNode *N); - SDOperand PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N); - SDOperand PromoteIntRes_FP_TO_XINT(SDNode *N); - SDOperand PromoteIntRes_INT_EXTEND(SDNode *N); - SDOperand PromoteIntRes_LOAD(LoadSDNode *N); - SDOperand PromoteIntRes_SDIV(SDNode *N); - SDOperand PromoteIntRes_SELECT (SDNode *N); - SDOperand PromoteIntRes_SELECT_CC(SDNode *N); - SDOperand PromoteIntRes_SETCC(SDNode *N); - SDOperand PromoteIntRes_SHL(SDNode *N); - SDOperand PromoteIntRes_SimpleIntBinOp(SDNode *N); - SDOperand PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N); - SDOperand PromoteIntRes_SRA(SDNode *N); - SDOperand PromoteIntRes_SRL(SDNode *N); - SDOperand PromoteIntRes_TRUNCATE(SDNode *N); - SDOperand PromoteIntRes_UDIV(SDNode *N); - SDOperand PromoteIntRes_UNDEF(SDNode *N); - SDOperand PromoteIntRes_VAARG(SDNode *N); + SDValue PromoteIntRes_AssertSext(SDNode *N); + SDValue PromoteIntRes_AssertZext(SDNode *N); + SDValue PromoteIntRes_BIT_CONVERT(SDNode *N); + SDValue PromoteIntRes_BSWAP(SDNode *N); + SDValue PromoteIntRes_BUILD_PAIR(SDNode *N); + SDValue PromoteIntRes_Constant(SDNode *N); + SDValue PromoteIntRes_CTLZ(SDNode *N); + SDValue PromoteIntRes_CTPOP(SDNode *N); + SDValue PromoteIntRes_CTTZ(SDNode *N); + SDValue PromoteIntRes_EXTRACT_VECTOR_ELT(SDNode *N); + SDValue PromoteIntRes_FP_TO_XINT(SDNode *N); + SDValue PromoteIntRes_INT_EXTEND(SDNode *N); + SDValue PromoteIntRes_LOAD(LoadSDNode *N); + SDValue PromoteIntRes_SDIV(SDNode *N); + SDValue PromoteIntRes_SELECT (SDNode *N); + SDValue PromoteIntRes_SELECT_CC(SDNode *N); + SDValue PromoteIntRes_SETCC(SDNode *N); + SDValue PromoteIntRes_SHL(SDNode *N); + SDValue PromoteIntRes_SimpleIntBinOp(SDNode *N); + SDValue PromoteIntRes_SIGN_EXTEND_INREG(SDNode *N); + SDValue PromoteIntRes_SRA(SDNode *N); + SDValue PromoteIntRes_SRL(SDNode *N); + SDValue PromoteIntRes_TRUNCATE(SDNode *N); + SDValue PromoteIntRes_UDIV(SDNode *N); + SDValue PromoteIntRes_UNDEF(SDNode *N); + SDValue PromoteIntRes_VAARG(SDNode *N); // Integer Operand Promotion. bool PromoteIntegerOperand(SDNode *N, unsigned OperandNo); - SDOperand PromoteIntOp_ANY_EXTEND(SDNode *N); - SDOperand PromoteIntOp_BUILD_PAIR(SDNode *N); - SDOperand PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo); - SDOperand PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo); - SDOperand PromoteIntOp_BUILD_VECTOR(SDNode *N); - SDOperand PromoteIntOp_FP_EXTEND(SDNode *N); - SDOperand PromoteIntOp_FP_ROUND(SDNode *N); - SDOperand PromoteIntOp_INT_TO_FP(SDNode *N); - SDOperand PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, unsigned OpNo); - SDOperand PromoteIntOp_MEMBARRIER(SDNode *N); - SDOperand PromoteIntOp_SELECT(SDNode *N, unsigned OpNo); - SDOperand PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo); - SDOperand PromoteIntOp_SETCC(SDNode *N, unsigned OpNo); - SDOperand PromoteIntOp_SIGN_EXTEND(SDNode *N); - SDOperand PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo); - SDOperand PromoteIntOp_TRUNCATE(SDNode *N); - SDOperand PromoteIntOp_ZERO_EXTEND(SDNode *N); - - void PromoteSetCCOperands(SDOperand &LHS,SDOperand &RHS, ISD::CondCode Code); + SDValue PromoteIntOp_ANY_EXTEND(SDNode *N); + SDValue PromoteIntOp_BUILD_PAIR(SDNode *N); + SDValue PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo); + SDValue PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo); + SDValue PromoteIntOp_BUILD_VECTOR(SDNode *N); + SDValue PromoteIntOp_FP_EXTEND(SDNode *N); + SDValue PromoteIntOp_FP_ROUND(SDNode *N); + SDValue PromoteIntOp_INT_TO_FP(SDNode *N); + SDValue PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N, unsigned OpNo); + SDValue PromoteIntOp_MEMBARRIER(SDNode *N); + SDValue PromoteIntOp_SELECT(SDNode *N, unsigned OpNo); + SDValue PromoteIntOp_SELECT_CC(SDNode *N, unsigned OpNo); + SDValue PromoteIntOp_SETCC(SDNode *N, unsigned OpNo); + SDValue PromoteIntOp_SIGN_EXTEND(SDNode *N); + SDValue PromoteIntOp_STORE(StoreSDNode *N, unsigned OpNo); + SDValue PromoteIntOp_TRUNCATE(SDNode *N); + SDValue PromoteIntOp_ZERO_EXTEND(SDNode *N); + + void PromoteSetCCOperands(SDValue &LHS,SDValue &RHS, ISD::CondCode Code); //===--------------------------------------------------------------------===// // Integer Expansion Support: LegalizeIntegerTypes.cpp //===--------------------------------------------------------------------===// - void GetExpandedInteger(SDOperand Op, SDOperand &Lo, SDOperand &Hi); - void SetExpandedInteger(SDOperand Op, SDOperand Lo, SDOperand Hi); + void GetExpandedInteger(SDValue Op, SDValue &Lo, SDValue &Hi); + void SetExpandedInteger(SDValue Op, SDValue Lo, SDValue Hi); // Integer Result Expansion. void ExpandIntegerResult(SDNode *N, unsigned ResNo); - void ExpandIntRes_ANY_EXTEND (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_AssertSext (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_AssertZext (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_Constant (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_CTLZ (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_CTPOP (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_CTTZ (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_LOAD (LoadSDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_SIGN_EXTEND (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_SIGN_EXTEND_INREG (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_TRUNCATE (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_ZERO_EXTEND (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_FP_TO_SINT (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_FP_TO_UINT (SDNode *N, SDOperand &Lo, SDOperand &Hi); - - void ExpandIntRes_Logical (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_ADDSUB (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_ADDSUBC (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_ADDSUBE (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_BSWAP (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_MUL (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_SDIV (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_SREM (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_UDIV (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_UREM (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandIntRes_Shift (SDNode *N, SDOperand &Lo, SDOperand &Hi); + void ExpandIntRes_ANY_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_AssertSext (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_AssertZext (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_Constant (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_CTLZ (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_CTPOP (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_CTTZ (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_LOAD (LoadSDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_SIGN_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_SIGN_EXTEND_INREG (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_TRUNCATE (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_ZERO_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_FP_TO_SINT (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_FP_TO_UINT (SDNode *N, SDValue &Lo, SDValue &Hi); + + void ExpandIntRes_Logical (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_ADDSUB (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_ADDSUBC (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_ADDSUBE (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_BSWAP (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_MUL (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_SDIV (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_SREM (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_UDIV (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_UREM (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandIntRes_Shift (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandShiftByConstant(SDNode *N, unsigned Amt, - SDOperand &Lo, SDOperand &Hi); - bool ExpandShiftWithKnownAmountBit(SDNode *N, SDOperand &Lo, SDOperand &Hi); + SDValue &Lo, SDValue &Hi); + bool ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi); // Integer Operand Expansion. bool ExpandIntegerOperand(SDNode *N, unsigned OperandNo); - SDOperand ExpandIntOp_BIT_CONVERT(SDNode *N); - SDOperand ExpandIntOp_BR_CC(SDNode *N); - SDOperand ExpandIntOp_BUILD_VECTOR(SDNode *N); - SDOperand ExpandIntOp_EXTRACT_ELEMENT(SDNode *N); - SDOperand ExpandIntOp_SELECT_CC(SDNode *N); - SDOperand ExpandIntOp_SETCC(SDNode *N); - SDOperand ExpandIntOp_SINT_TO_FP(SDNode *N); - SDOperand ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo); - SDOperand ExpandIntOp_TRUNCATE(SDNode *N); - SDOperand ExpandIntOp_UINT_TO_FP(SDNode *N); - - void IntegerExpandSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS, + SDValue ExpandIntOp_BIT_CONVERT(SDNode *N); + SDValue ExpandIntOp_BR_CC(SDNode *N); + SDValue ExpandIntOp_BUILD_VECTOR(SDNode *N); + SDValue ExpandIntOp_EXTRACT_ELEMENT(SDNode *N); + SDValue ExpandIntOp_SELECT_CC(SDNode *N); + SDValue ExpandIntOp_SETCC(SDNode *N); + SDValue ExpandIntOp_SINT_TO_FP(SDNode *N); + SDValue ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo); + SDValue ExpandIntOp_TRUNCATE(SDNode *N); + SDValue ExpandIntOp_UINT_TO_FP(SDNode *N); + + void IntegerExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode); //===--------------------------------------------------------------------===// // Float to Integer Conversion Support: LegalizeFloatTypes.cpp //===--------------------------------------------------------------------===// - SDOperand GetSoftenedFloat(SDOperand Op) { - SDOperand &SoftenedOp = SoftenedFloats[Op]; + SDValue GetSoftenedFloat(SDValue Op) { + SDValue &SoftenedOp = SoftenedFloats[Op]; RemapNode(SoftenedOp); assert(SoftenedOp.Val && "Operand wasn't converted to integer?"); return SoftenedOp; } - void SetSoftenedFloat(SDOperand Op, SDOperand Result); + void SetSoftenedFloat(SDValue Op, SDValue Result); // Result Float to Integer Conversion. void SoftenFloatResult(SDNode *N, unsigned OpNo); - SDOperand SoftenFloatRes_BIT_CONVERT(SDNode *N); - SDOperand SoftenFloatRes_BUILD_PAIR(SDNode *N); - SDOperand SoftenFloatRes_ConstantFP(ConstantFPSDNode *N); - SDOperand SoftenFloatRes_FADD(SDNode *N); - SDOperand SoftenFloatRes_FCOPYSIGN(SDNode *N); - SDOperand SoftenFloatRes_FDIV(SDNode *N); - SDOperand SoftenFloatRes_FMUL(SDNode *N); - SDOperand SoftenFloatRes_FP_EXTEND(SDNode *N); - SDOperand SoftenFloatRes_FP_ROUND(SDNode *N); - SDOperand SoftenFloatRes_FPOWI(SDNode *N); - SDOperand SoftenFloatRes_FSUB(SDNode *N); - SDOperand SoftenFloatRes_LOAD(SDNode *N); - SDOperand SoftenFloatRes_SELECT(SDNode *N); - SDOperand SoftenFloatRes_SELECT_CC(SDNode *N); - SDOperand SoftenFloatRes_SINT_TO_FP(SDNode *N); - SDOperand SoftenFloatRes_UINT_TO_FP(SDNode *N); + SDValue SoftenFloatRes_BIT_CONVERT(SDNode *N); + SDValue SoftenFloatRes_BUILD_PAIR(SDNode *N); + SDValue SoftenFloatRes_ConstantFP(ConstantFPSDNode *N); + SDValue SoftenFloatRes_FADD(SDNode *N); + SDValue SoftenFloatRes_FCOPYSIGN(SDNode *N); + SDValue SoftenFloatRes_FDIV(SDNode *N); + SDValue SoftenFloatRes_FMUL(SDNode *N); + SDValue SoftenFloatRes_FP_EXTEND(SDNode *N); + SDValue SoftenFloatRes_FP_ROUND(SDNode *N); + SDValue SoftenFloatRes_FPOWI(SDNode *N); + SDValue SoftenFloatRes_FSUB(SDNode *N); + SDValue SoftenFloatRes_LOAD(SDNode *N); + SDValue SoftenFloatRes_SELECT(SDNode *N); + SDValue SoftenFloatRes_SELECT_CC(SDNode *N); + SDValue SoftenFloatRes_SINT_TO_FP(SDNode *N); + SDValue SoftenFloatRes_UINT_TO_FP(SDNode *N); // Operand Float to Integer Conversion. bool SoftenFloatOperand(SDNode *N, unsigned OpNo); - SDOperand SoftenFloatOp_BIT_CONVERT(SDNode *N); - SDOperand SoftenFloatOp_BR_CC(SDNode *N); - SDOperand SoftenFloatOp_FP_TO_SINT(SDNode *N); - SDOperand SoftenFloatOp_FP_TO_UINT(SDNode *N); - SDOperand SoftenFloatOp_SELECT_CC(SDNode *N); - SDOperand SoftenFloatOp_SETCC(SDNode *N); - SDOperand SoftenFloatOp_STORE(SDNode *N, unsigned OpNo); - - void SoftenSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS, + SDValue SoftenFloatOp_BIT_CONVERT(SDNode *N); + SDValue SoftenFloatOp_BR_CC(SDNode *N); + SDValue SoftenFloatOp_FP_TO_SINT(SDNode *N); + SDValue SoftenFloatOp_FP_TO_UINT(SDNode *N); + SDValue SoftenFloatOp_SELECT_CC(SDNode *N); + SDValue SoftenFloatOp_SETCC(SDNode *N); + SDValue SoftenFloatOp_STORE(SDNode *N, unsigned OpNo); + + void SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode); //===--------------------------------------------------------------------===// // Float Expansion Support: LegalizeFloatTypes.cpp //===--------------------------------------------------------------------===// - void GetExpandedFloat(SDOperand Op, SDOperand &Lo, SDOperand &Hi); - void SetExpandedFloat(SDOperand Op, SDOperand Lo, SDOperand Hi); + void GetExpandedFloat(SDValue Op, SDValue &Lo, SDValue &Hi); + void SetExpandedFloat(SDValue Op, SDValue Lo, SDValue Hi); // Float Result Expansion. void ExpandFloatResult(SDNode *N, unsigned ResNo); - void ExpandFloatRes_ConstantFP(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandFloatRes_FABS (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandFloatRes_FADD (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandFloatRes_FDIV (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandFloatRes_FMUL (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandFloatRes_FNEG (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandFloatRes_FP_EXTEND (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandFloatRes_FSUB (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandFloatRes_LOAD (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandFloatRes_XINT_TO_FP(SDNode *N, SDOperand &Lo, SDOperand &Hi); + void ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_FABS (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_FADD (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_FDIV (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_FMUL (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_FNEG (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_FP_EXTEND (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_FSUB (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_LOAD (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo, SDValue &Hi); // Float Operand Expansion. bool ExpandFloatOperand(SDNode *N, unsigned OperandNo); - SDOperand ExpandFloatOp_BR_CC(SDNode *N); - SDOperand ExpandFloatOp_FP_ROUND(SDNode *N); - SDOperand ExpandFloatOp_FP_TO_SINT(SDNode *N); - SDOperand ExpandFloatOp_FP_TO_UINT(SDNode *N); - SDOperand ExpandFloatOp_SELECT_CC(SDNode *N); - SDOperand ExpandFloatOp_SETCC(SDNode *N); - SDOperand ExpandFloatOp_STORE(SDNode *N, unsigned OpNo); - - void FloatExpandSetCCOperands(SDOperand &NewLHS, SDOperand &NewRHS, + SDValue ExpandFloatOp_BR_CC(SDNode *N); + SDValue ExpandFloatOp_FP_ROUND(SDNode *N); + SDValue ExpandFloatOp_FP_TO_SINT(SDNode *N); + SDValue ExpandFloatOp_FP_TO_UINT(SDNode *N); + SDValue ExpandFloatOp_SELECT_CC(SDNode *N); + SDValue ExpandFloatOp_SETCC(SDNode *N); + SDValue ExpandFloatOp_STORE(SDNode *N, unsigned OpNo); + + void FloatExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode); //===--------------------------------------------------------------------===// // Scalarization Support: LegalizeVectorTypes.cpp //===--------------------------------------------------------------------===// - SDOperand GetScalarizedVector(SDOperand Op) { - SDOperand &ScalarizedOp = ScalarizedVectors[Op]; + SDValue GetScalarizedVector(SDValue Op) { + SDValue &ScalarizedOp = ScalarizedVectors[Op]; RemapNode(ScalarizedOp); assert(ScalarizedOp.Val && "Operand wasn't scalarized?"); return ScalarizedOp; } - void SetScalarizedVector(SDOperand Op, SDOperand Result); + void SetScalarizedVector(SDValue Op, SDValue Result); // Vector Result Scalarization: <1 x ty> -> ty. void ScalarizeVectorResult(SDNode *N, unsigned OpNo); - SDOperand ScalarizeVecRes_BinOp(SDNode *N); - SDOperand ScalarizeVecRes_UnaryOp(SDNode *N); - - SDOperand ScalarizeVecRes_BIT_CONVERT(SDNode *N); - SDOperand ScalarizeVecRes_FPOWI(SDNode *N); - SDOperand ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N); - SDOperand ScalarizeVecRes_LOAD(LoadSDNode *N); - SDOperand ScalarizeVecRes_SELECT(SDNode *N); - SDOperand ScalarizeVecRes_UNDEF(SDNode *N); - SDOperand ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N); - SDOperand ScalarizeVecRes_VSETCC(SDNode *N); + SDValue ScalarizeVecRes_BinOp(SDNode *N); + SDValue ScalarizeVecRes_UnaryOp(SDNode *N); + + SDValue ScalarizeVecRes_BIT_CONVERT(SDNode *N); + SDValue ScalarizeVecRes_FPOWI(SDNode *N); + SDValue ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N); + SDValue ScalarizeVecRes_LOAD(LoadSDNode *N); + SDValue ScalarizeVecRes_SELECT(SDNode *N); + SDValue ScalarizeVecRes_UNDEF(SDNode *N); + SDValue ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N); + SDValue ScalarizeVecRes_VSETCC(SDNode *N); // Vector Operand Scalarization: <1 x ty> -> ty. bool ScalarizeVectorOperand(SDNode *N, unsigned OpNo); - SDOperand ScalarizeVecOp_BIT_CONVERT(SDNode *N); - SDOperand ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N); - SDOperand ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo); + SDValue ScalarizeVecOp_BIT_CONVERT(SDNode *N); + SDValue ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N); + SDValue ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo); //===--------------------------------------------------------------------===// // Vector Splitting Support: LegalizeVectorTypes.cpp //===--------------------------------------------------------------------===// - void GetSplitVector(SDOperand Op, SDOperand &Lo, SDOperand &Hi); - void SetSplitVector(SDOperand Op, SDOperand Lo, SDOperand Hi); + void GetSplitVector(SDValue Op, SDValue &Lo, SDValue &Hi); + void SetSplitVector(SDValue Op, SDValue Lo, SDValue Hi); // Vector Result Splitting: <128 x ty> -> 2 x <64 x ty>. void SplitVectorResult(SDNode *N, unsigned OpNo); - void SplitVecRes_BinOp(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_UnaryOp(SDNode *N, SDOperand &Lo, SDOperand &Hi); - - void SplitVecRes_BIT_CONVERT(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_BUILD_PAIR(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_BUILD_VECTOR(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_CONCAT_VECTORS(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_FPOWI(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_LOAD(LoadSDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_UNDEF(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitVecRes_VSETCC(SDNode *N, SDOperand &Lo, SDOperand &Hi); + void SplitVecRes_BinOp(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi); + + void SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_BUILD_PAIR(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_FPOWI(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_LOAD(LoadSDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_UNDEF(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitVecRes_VSETCC(SDNode *N, SDValue &Lo, SDValue &Hi); // Vector Operand Splitting: <128 x ty> -> 2 x <64 x ty>. bool SplitVectorOperand(SDNode *N, unsigned OpNo); - SDOperand SplitVecOp_BIT_CONVERT(SDNode *N); - SDOperand SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N); - SDOperand SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N); - SDOperand SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo); - SDOperand SplitVecOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo); + SDValue SplitVecOp_BIT_CONVERT(SDNode *N); + SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N); + SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N); + SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo); + SDValue SplitVecOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo); //===--------------------------------------------------------------------===// // Generic Splitting: LegalizeTypesGeneric.cpp @@ -466,7 +466,7 @@ private: // not necessarily identical types. As such they can be used for splitting // vectors and expanding integers and floats. - void GetSplitOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi) { + void GetSplitOp(SDValue Op, SDValue &Lo, SDValue &Hi) { if (Op.getValueType().isVector()) GetSplitVector(Op, Lo, Hi); else if (Op.getValueType().isInteger()) @@ -480,10 +480,10 @@ private: void GetSplitDestVTs(MVT InVT, MVT &LoVT, MVT &HiVT); // Generic Result Splitting. - void SplitRes_MERGE_VALUES(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitRes_SELECT (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitRes_SELECT_CC (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void SplitRes_UNDEF (SDNode *N, SDOperand &Lo, SDOperand &Hi); + void SplitRes_MERGE_VALUES(SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitRes_SELECT (SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitRes_SELECT_CC (SDNode *N, SDValue &Lo, SDValue &Hi); + void SplitRes_UNDEF (SDNode *N, SDValue &Lo, SDValue &Hi); //===--------------------------------------------------------------------===// // Generic Expansion: LegalizeTypesGeneric.cpp @@ -494,7 +494,7 @@ private: // in memory on little/big-endian machines, followed by the Hi/Lo part. As // such they can be used for expanding integers and floats. - void GetExpandedOp(SDOperand Op, SDOperand &Lo, SDOperand &Hi) { + void GetExpandedOp(SDValue Op, SDValue &Lo, SDValue &Hi) { if (Op.getValueType().isInteger()) GetExpandedInteger(Op, Lo, Hi); else @@ -502,17 +502,17 @@ private: } // Generic Result Expansion. - void ExpandRes_BIT_CONVERT (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandRes_BUILD_PAIR (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandRes_EXTRACT_ELEMENT (SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDOperand &Lo, SDOperand &Hi); - void ExpandRes_NormalLoad (SDNode *N, SDOperand &Lo, SDOperand &Hi); + void ExpandRes_BIT_CONVERT (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandRes_BUILD_PAIR (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandRes_EXTRACT_ELEMENT (SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi); + void ExpandRes_NormalLoad (SDNode *N, SDValue &Lo, SDValue &Hi); // Generic Operand Expansion. - SDOperand ExpandOp_BIT_CONVERT (SDNode *N); - SDOperand ExpandOp_BUILD_VECTOR (SDNode *N); - SDOperand ExpandOp_EXTRACT_ELEMENT(SDNode *N); - SDOperand ExpandOp_NormalStore (SDNode *N, unsigned OpNo); + SDValue ExpandOp_BIT_CONVERT (SDNode *N); + SDValue ExpandOp_BUILD_VECTOR (SDNode *N); + SDValue ExpandOp_EXTRACT_ELEMENT(SDNode *N); + SDValue ExpandOp_NormalStore (SDNode *N, unsigned OpNo); }; diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp index 783cc9d213..59b8ab8201 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp @@ -28,10 +28,10 @@ using namespace llvm; // little/big-endian machines, followed by the Hi/Lo part. This means that // they cannot be used as is on vectors, for which Lo is always stored first. -void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo, + SDValue &Hi) { MVT NVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand InOp = N->getOperand(0); + SDValue InOp = N->getOperand(0); MVT InVT = InOp.getValueType(); // Handle some special cases efficiently. @@ -74,21 +74,21 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDOperand &Lo, } // Lower the bit-convert to a store/load from the stack, then expand the load. - SDOperand Op = CreateStackStoreLoad(InOp, N->getValueType(0)); + SDValue Op = CreateStackStoreLoad(InOp, N->getValueType(0)); ExpandRes_NormalLoad(Op.Val, Lo, Hi); } -void DAGTypeLegalizer::ExpandRes_BUILD_PAIR(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandRes_BUILD_PAIR(SDNode *N, SDValue &Lo, + SDValue &Hi) { // Return the operands. Lo = N->getOperand(0); Hi = N->getOperand(1); } -void DAGTypeLegalizer::ExpandRes_EXTRACT_ELEMENT(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandRes_EXTRACT_ELEMENT(SDNode *N, SDValue &Lo, + SDValue &Hi) { GetExpandedOp(N->getOperand(0), Lo, Hi); - SDOperand Part = cast<ConstantSDNode>(N->getOperand(1))->getValue() ? Hi : Lo; + SDValue Part = cast<ConstantSDNode>(N->getOperand(1))->getValue() ? Hi : Lo; assert(Part.getValueType() == N->getValueType(0) && "Type twice as big as expanded type not itself expanded!"); @@ -100,9 +100,9 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_ELEMENT(SDNode *N, SDOperand &Lo, DAG.getConstant(1, TLI.getPointerTy())); } -void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { - SDOperand OldVec = N->getOperand(0); +void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue OldVec = N->getOperand(0); unsigned OldElts = OldVec.getValueType().getVectorNumElements(); // Convert to a vector of the expanded element type, for example @@ -110,12 +110,12 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDOperand &Lo, MVT OldVT = N->getValueType(0); MVT NewVT = TLI.getTypeToTransformTo(OldVT); - SDOperand NewVec = DAG.getNode(ISD::BIT_CONVERT, + SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, MVT::getVectorVT(NewVT, 2*OldElts), OldVec); // Extract the elements at 2 * Idx and 2 * Idx + 1 from the new vector. - SDOperand Idx = N->getOperand(1); + SDValue Idx = N->getOperand(1); // Make sure the type of Idx is big enough to hold the new values. if (Idx.getValueType().bitsLT(TLI.getPointerTy())) @@ -132,14 +132,14 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDOperand &Lo, std::swap(Lo, Hi); } -void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo, + SDValue &Hi) { assert(ISD::isNormalLoad(N) && "This routine only for normal loads!"); LoadSDNode *LD = cast<LoadSDNode>(N); MVT NVT = TLI.getTypeToTransformTo(LD->getValueType(0)); - SDOperand Chain = LD->getChain(); - SDOperand Ptr = LD->getBasePtr(); + SDValue Chain = LD->getChain(); + SDValue Ptr = LD->getBasePtr(); int SVOffset = LD->getSrcValueOffset(); unsigned Alignment = LD->getAlignment(); bool isVolatile = LD->isVolatile(); @@ -167,7 +167,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDOperand &Lo, // Modified the chain - switch anything that used the old chain to use // the new one. - ReplaceValueWith(SDOperand(N, 1), Chain); + ReplaceValueWith(SDValue(N, 1), Chain); } @@ -175,7 +175,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDOperand &Lo, // Generic Operand Expansion. //===--------------------------------------------------------------------===// -SDOperand DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) { +SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) { if (N->getValueType(0).isVector()) { // An illegal expanding type is being converted to a legal vector type. // Make a two element vector out of the expanded parts and convert that @@ -186,13 +186,13 @@ SDOperand DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) { MVT NVT = MVT::getVectorVT(TLI.getTypeToTransformTo(OVT), 2); if (isTypeLegal(NVT)) { - SDOperand Parts[2]; + SDValue Parts[2]; GetExpandedOp(N->getOperand(0), Parts[0], Parts[1]); if (TLI.isBigEndian()) std::swap(Parts[0], Parts[1]); - SDOperand Vec = DAG.getNode(ISD::BUILD_VECTOR, NVT, Parts, 2); + SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, NVT, Parts, 2); return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Vec); } } @@ -201,7 +201,7 @@ SDOperand DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) { return CreateStackStoreLoad(N->getOperand(0), N->getValueType(0)); } -SDOperand DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) { +SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) { // The vector type is legal but the element type needs expansion. MVT VecVT = N->getValueType(0); unsigned NumElts = VecVT.getVectorNumElements(); @@ -210,11 +210,11 @@ SDOperand DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) { // Build a vector of twice the length out of the expanded elements. // For example <3 x i64> -> <6 x i32>. - std::vector<SDOperand> NewElts; + std::vector<SDValue> NewElts; NewElts.reserve(NumElts*2); for (unsigned i = 0; i < NumElts; ++i) { - SDOperand Lo, Hi; + SDValue Lo, Hi; GetExpandedOp(N->getOperand(i), Lo, Hi); if (TLI.isBigEndian()) std::swap(Lo, Hi); @@ -222,7 +222,7 @@ SDOperand DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) { NewElts.push_back(Hi); } - SDOperand NewVec = DAG.getNode(ISD::BUILD_VECTOR, + SDValue NewVec = DAG.getNode(ISD::BUILD_VECTOR, MVT::getVectorVT(NewVT, NewElts.size()), &NewElts[0], NewElts.size()); @@ -230,20 +230,20 @@ SDOperand DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) { return DAG.getNode(ISD::BIT_CONVERT, VecVT, NewVec); } -SDOperand DAGTypeLegalizer::ExpandOp_EXTRACT_ELEMENT(SDNode *N) { - SDOperand Lo, Hi; +SDValue DAGTypeLegalizer::ExpandOp_EXTRACT_ELEMENT(SDNode *N) { + SDValue Lo, Hi; GetExpandedOp(N->getOperand(0), Lo, Hi); return cast<ConstantSDNode>(N->getOperand(1))->getValue() ? Hi : Lo; } -SDOperand DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) { assert(ISD::isNormalStore(N) && "This routine only for normal stores!"); assert(OpNo == 1 && "Can only expand the stored value so far"); StoreSDNode *St = cast<StoreSDNode>(N); MVT NVT = TLI.getTypeToTransformTo(St->getValue().getValueType()); - SDOperand Chain = St->getChain(); - SDOperand Ptr = St->getBasePtr(); + SDValue Chain = St->getChain(); + SDValue Ptr = St->getBasePtr(); int SVOffset = St->getSrcValueOffset(); unsigned Alignment = St->getAlignment(); bool isVolatile = St->isVolatile(); @@ -251,7 +251,7 @@ SDOperand DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) { assert(NVT.isByteSized() && "Expanded type not byte sized!"); unsigned IncrementSize = NVT.getSizeInBits() / 8; - SDOperand Lo, Hi; + SDValue Lo, Hi; GetExpandedOp(St->getValue(), Lo, Hi); if (TLI.isBigEndian()) @@ -280,7 +280,7 @@ SDOperand DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) { // little-endian). void DAGTypeLegalizer::SplitRes_MERGE_VALUES(SDNode *N, - SDOperand &Lo, SDOperand &Hi) { + SDValue &Lo, SDValue &Hi) { // A MERGE_VALUES node can produce any number of values. We know that the // first illegal one needs to be expanded into Lo/Hi. unsigned i; @@ -288,7 +288,7 @@ void DAGTypeLegalizer::SplitRes_MERGE_VALUES(SDNode *N, // The string of legal results gets turns into the input operands, which have // the same type. for (i = 0; isTypeLegal(N->getValueType(i)); ++i) - ReplaceValueWith(SDOperand(N, i), SDOperand(N->getOperand(i))); + ReplaceValueWith(SDValue(N, i), SDValue(N->getOperand(i))); // The first illegal result must be the one that needs to be expanded. GetSplitOp(N->getOperand(i), Lo, Hi); @@ -297,23 +297,23 @@ void DAGTypeLegalizer::SplitRes_MERGE_VALUES(SDNode *N, // legal or not. unsigned e = N->getNumValues(); for (++i; i != e; ++i) - ReplaceValueWith(SDOperand(N, i), SDOperand(N->getOperand(i))); + ReplaceValueWith(SDValue(N, i), SDValue(N->getOperand(i))); } -void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { - SDOperand LL, LH, RL, RH; +void DAGTypeLegalizer::SplitRes_SELECT(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue LL, LH, RL, RH; GetSplitOp(N->getOperand(1), LL, LH); GetSplitOp(N->getOperand(2), RL, RH); - SDOperand Cond = N->getOperand(0); + SDValue Cond = N->getOperand(0); Lo = DAG.getNode(ISD::SELECT, LL.getValueType(), Cond, LL, RL); Hi = DAG.getNode(ISD::SELECT, LH.getValueType(), Cond, LH, RH); } -void DAGTypeLegalizer::SplitRes_SELECT_CC(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { - SDOperand LL, LH, RL, RH; +void DAGTypeLegalizer::SplitRes_SELECT_CC(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue LL, LH, RL, RH; GetSplitOp(N->getOperand(2), LL, LH); GetSplitOp(N->getOperand(3), RL, RH); @@ -323,7 +323,7 @@ void DAGTypeLegalizer::SplitRes_SELECT_CC(SDNode *N, SDOperand &Lo, N->getOperand(1), LH, RH, N->getOperand(4)); } -void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDOperand &Lo, SDOperand &Hi) { +void DAGTypeLegalizer::SplitRes_UNDEF(SDNode *N, SDValue &Lo, SDValue &Hi) { MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); Lo = DAG.getNode(ISD::UNDEF, LoVT); diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index b66d56b3a9..e0e40e309b 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -30,7 +30,7 @@ using namespace llvm; void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) { DEBUG(cerr << "Scalarize node result " << ResNo << ": "; N->dump(&DAG); cerr << "\n"); - SDOperand R = SDOperand(); + SDValue R = SDValue(); switch (N->getOpcode()) { default: @@ -84,29 +84,29 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) { // If R is null, the sub-method took care of registering the result. if (R.Val) - SetScalarizedVector(SDOperand(N, ResNo), R); + SetScalarizedVector(SDValue(N, ResNo), R); } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) { - SDOperand LHS = GetScalarizedVector(N->getOperand(0)); - SDOperand RHS = GetScalarizedVector(N->getOperand(1)); +SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) { + SDValue LHS = GetScalarizedVector(N->getOperand(0)); + SDValue RHS = GetScalarizedVector(N->getOperand(1)); return DAG.getNode(N->getOpcode(), LHS.getValueType(), LHS, RHS); } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_BIT_CONVERT(SDNode *N) { +SDValue DAGTypeLegalizer::ScalarizeVecRes_BIT_CONVERT(SDNode *N) { MVT NewVT = N->getValueType(0).getVectorElementType(); return DAG.getNode(ISD::BIT_CONVERT, NewVT, N->getOperand(0)); } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_FPOWI(SDNode *N) { - SDOperand Op = GetScalarizedVector(N->getOperand(0)); +SDValue DAGTypeLegalizer::ScalarizeVecRes_FPOWI(SDNode *N) { + SDValue Op = GetScalarizedVector(N->getOperand(0)); return DAG.getNode(ISD::FPOWI, Op.getValueType(), Op, N->getOperand(1)); } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) { +SDValue DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) { // The value to insert may have a wider type than the vector element type, // so be sure to truncate it to the element type if necessary. - SDOperand Op = N->getOperand(1); + SDValue Op = N->getOperand(1); MVT EltVT = N->getValueType(0).getVectorElementType(); if (Op.getValueType() != EltVT) // FIXME: Can this happen for floating point types? @@ -114,47 +114,47 @@ SDOperand DAGTypeLegalizer::ScalarizeVecRes_INSERT_VECTOR_ELT(SDNode *N) { return Op; } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) { +SDValue DAGTypeLegalizer::ScalarizeVecRes_LOAD(LoadSDNode *N) { assert(ISD::isNormalLoad(N) && "Extending load of one-element vector?"); - SDOperand Result = DAG.getLoad(N->getValueType(0).getVectorElementType(), + SDValue Result = DAG.getLoad(N->getValueType(0).getVectorElementType(), N->getChain(), N->getBasePtr(), N->getSrcValue(), N->getSrcValueOffset(), N->isVolatile(), N->getAlignment()); // Legalized the chain result - switch anything that used the old chain to // use the new one. - ReplaceValueWith(SDOperand(N, 1), Result.getValue(1)); + ReplaceValueWith(SDValue(N, 1), Result.getValue(1)); return Result; } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) { +SDValue DAGTypeLegalizer::ScalarizeVecRes_UnaryOp(SDNode *N) { // Get the dest type - it doesn't always match the input type, e.g. int_to_fp. MVT DestVT = TLI.getTypeToTransformTo(N->getValueType(0)); - SDOperand Op = GetScalarizedVector(N->getOperand(0)); + SDValue Op = GetScalarizedVector(N->getOperand(0)); return DAG.getNode(N->getOpcode(), DestVT, Op); } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) { +SDValue DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) { return DAG.getNode(ISD::UNDEF, N->getValueType(0).getVectorElementType()); } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_SELECT(SDNode *N) { - SDOperand LHS = GetScalarizedVector(N->getOperand(1)); +SDValue DAGTypeLegalizer::ScalarizeVecRes_SELECT(SDNode *N) { + SDValue LHS = GetScalarizedVector(N->getOperand(1)); return DAG.getNode(ISD::SELECT, LHS.getValueType(), N->getOperand(0), LHS, GetScalarizedVector(N->getOperand(2))); } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) { +SDValue DAGTypeLegalizer::ScalarizeVecRes_VECTOR_SHUFFLE(SDNode *N) { // Figure out if the scalar is the LHS or RHS and return it. - SDOperand EltNum = N->getOperand(2).getOperand(0); + SDValue EltNum = N->getOperand(2).getOperand(0); unsigned Op = cast<ConstantSDNode>(EltNum)->getValue() != 0; return GetScalarizedVector(N->getOperand(Op)); } -SDOperand DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) { +SDValue DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) { MVT NewVT = N->getValueType(0).getVectorElementType(); - SDOperand LHS = GetScalarizedVector(N->getOperand(0)); - SDOperand RHS = GetScalarizedVector(N->getOperand(1)); + SDValue LHS = GetScalarizedVector(N->getOperand(0)); + SDValue RHS = GetScalarizedVector(N->getOperand(1)); LHS = DAG.getNode(ISD::SETCC, TLI.getSetCCResultType(LHS), LHS, RHS, N->getOperand(2)); return @@ -172,7 +172,7 @@ SDOperand DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) { bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) { DEBUG(cerr << "Scalarize node operand " << OpNo << ": "; N->dump(&DAG); cerr << "\n"); - SDOperand Res = SDOperand(); + SDValue Res = SDValue(); if (Res.Val == 0) { switch (N->getOpcode()) { @@ -211,27 +211,27 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) { assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 && "Invalid operand expansion"); - ReplaceValueWith(SDOperand(N, 0), Res); + ReplaceValueWith(SDValue(N, 0), Res); return false; } /// ScalarizeVecOp_BIT_CONVERT - If the value to convert is a vector that needs /// to be scalarized, it must be <1 x ty>. Convert the element instead. -SDOperand DAGTypeLegalizer::ScalarizeVecOp_BIT_CONVERT(SDNode *N) { - SDOperand Elt = GetScalarizedVector(N->getOperand(0)); +SDValue DAGTypeLegalizer::ScalarizeVecOp_BIT_CONVERT(SDNode *N) { + SDValue Elt = GetScalarizedVector(N->getOperand(0)); return DAG.getNode(ISD::BIT_CONVERT, N->getValueType(0), Elt); } /// ScalarizeVecOp_EXTRACT_VECTOR_ELT - If the input is a vector that needs to /// be scalarized, it must be <1 x ty>, so just return the element, ignoring the /// index. -SDOperand DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N) { +SDValue DAGTypeLegalizer::ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N) { return GetScalarizedVector(N->getOperand(0)); } /// ScalarizeVecOp_STORE - If the value to store is a vector that needs to be /// scalarized, it must be <1 x ty>. Just store the element. -SDOperand DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){ +SDValue DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){ assert(ISD::isNormalStore(N) && "Truncating store of one-element vector?"); assert(OpNo == 1 && "Do not know how to scalarize this operand!"); return DAG.getStore(N->getChain(), GetScalarizedVector(N->getOperand(1)), @@ -251,7 +251,7 @@ SDOperand DAGTypeLegalizer::ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo){ /// splitting. void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) { DEBUG(cerr << "Split node result: "; N->dump(&DAG); cerr << "\n"); - SDOperand Lo, Hi; + SDValue Lo, Hi; switch (N->getOpcode()) { default: @@ -309,28 +309,28 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) { // If Lo/Hi is null, the sub-method took care of registering results etc. if (Lo.Val) - SetSplitVector(SDOperand(N, ResNo), Lo, Hi); + SetSplitVector(SDValue(N, ResNo), Lo, Hi); } -void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { - SDOperand LHSLo, LHSHi; +void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue LHSLo, LHSHi; GetSplitVector(N->getOperand(0), LHSLo, LHSHi); - SDOperand RHSLo, RHSHi; + SDValue RHSLo, RHSHi; GetSplitVector(N->getOperand(1), RHSLo, RHSHi); Lo = DAG.getNode(N->getOpcode(), LHSLo.getValueType(), LHSLo, RHSLo); Hi = DAG.getNode(N->getOpcode(), LHSHi.getValueType(), LHSHi, RHSHi); } -void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo, + SDValue &Hi) { // We know the result is a vector. The input may be either a vector or a // scalar value. MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); - SDOperand InOp = N->getOperand(0); + SDValue InOp = N->getOperand(0); MVT InVT = InOp.getValueType(); // Handle some special cases efficiently. @@ -379,20 +379,20 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDOperand &Lo, Hi = DAG.getNode(ISD::BIT_CONVERT, HiVT, Hi); } -void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, + SDValue &Hi) { MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); unsigned LoNumElts = LoVT.getVectorNumElements(); - SmallVector<SDOperand, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts); + SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+LoNumElts); Lo = DAG.getNode(ISD::BUILD_VECTOR, LoVT, &LoOps[0], LoOps.size()); - SmallVector<SDOperand, 8> HiOps(N->op_begin()+LoNumElts, N->op_end()); + SmallVector<SDValue, 8> HiOps(N->op_begin()+LoNumElts, N->op_end()); Hi = DAG.getNode(ISD::BUILD_VECTOR, HiVT, &HiOps[0], HiOps.size()); } -void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo, + SDValue &Hi) { assert(!(N->getNumOperands() & 1) && "Unsupported CONCAT_VECTORS"); unsigned NumSubvectors = N->getNumOperands() / 2; if (NumSubvectors == 1) { @@ -404,25 +404,25 @@ void DAGTypeLegalizer::SplitVecRes_CONCAT_VECTORS(SDNode *N, SDOperand &Lo, MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); - SmallVector<SDOperand, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors); + SmallVector<SDValue, 8> LoOps(N->op_begin(), N->op_begin()+NumSubvectors); Lo = DAG.getNode(ISD::CONCAT_VECTORS, LoVT, &LoOps[0], LoOps.size()); - SmallVector<SDOperand, 8> HiOps(N->op_begin()+NumSubvectors, N->op_end()); + SmallVector<SDValue, 8> HiOps(N->op_begin()+NumSubvectors, N->op_end()); Hi = DAG.getNode(ISD::CONCAT_VECTORS, HiVT, &HiOps[0], HiOps.size()); } -void DAGTypeLegalizer::SplitVecRes_FPOWI(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::SplitVecRes_FPOWI(SDNode *N, SDValue &Lo, + SDValue &Hi) { GetSplitVector(N->getOperand(0), Lo, Hi); Lo = DAG.getNode(ISD::FPOWI, Lo.getValueType(), Lo, N->getOperand(1)); Hi = DAG.getNode(ISD::FPOWI, Hi.getValueType(), Hi, N->getOperand(1)); } -void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { - SDOperand Vec = N->getOperand(0); - SDOperand Elt = N->getOperand(1); - SDOperand Idx = N->getOperand(2); +void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo, + SDValue &Hi) { + SDValue Vec = N->getOperand(0); + SDValue Elt = N->getOperand(1); + SDValue Idx = N->getOperand(2); GetSplitVector(Vec, Lo, Hi); if (ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) { @@ -439,29 +439,29 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDOperand &Lo, // Spill the vector to the stack. MVT VecVT = Vec.getValueType(); MVT EltVT = VecVT.getVectorElementType(); - SDOperand StackPtr = DAG.CreateStackTemporary(VecVT); - SDOperand Store = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0); + SDValue StackPtr = DAG.CreateStackTemporary(VecVT); + SDValue Store = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0); // Store the new element. This may be larger than the vector element type, // so use a truncating store. - SDOperand EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx); + SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx); Store = DAG.getTruncStore(Store, Elt, EltPtr, NULL, 0, EltVT); // Reload the vector from the stack. - SDOperand Load = DAG.getLoad(VecVT, Store, StackPtr, NULL, 0); + SDValue Load = DAG.getLoad(VecVT, Store, StackPtr, NULL, 0); // Split it. SplitVecRes_LOAD(cast<LoadSDNode>(Load.Val), Lo, Hi); } -void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo, + SDValue &Hi) { assert(ISD::isUNINDEXEDLoad(LD) && "Indexed load during type legalization!"); MVT LoVT, HiVT; GetSplitDestVTs(LD->getValueType(0), LoVT, HiVT); - SDOperand Ch = LD->getChain(); - SDOperand Ptr = LD->getBasePtr(); + SDValue Ch = LD->getChain(); + SDValue Ptr = LD->getBasePtr(); const Value *SV = LD->getSrcValue(); int SVOffset = LD->getSrcValueOffset(); unsigned Alignment = LD->getAlignment(); @@ -490,11 +490,11 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDOperand &Lo, // Legalized the chain result - switch anything that used the old chain to // use the new one. - ReplaceValueWith(SDOperand(LD, 1), Ch); + ReplaceValueWith(SDValue(LD, 1), Ch); } -void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, + SDValue &Hi) { // Get the dest types - they may not match the input types, e.g. int_to_fp. MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); @@ -504,11 +504,11 @@ void DAGTypeLegalizer::SplitVecRes_UnaryOp(SDNode *N, SDOperand &Lo, Hi = DAG.getNode(N->getOpcode(), HiVT, Hi); } -void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDValue &Lo, + SDValue &Hi) { // Build the low part. - SDOperand Mask = N->getOperand(2); - SmallVector<SDOperand, 16> Ops; + SDValue Mask = N->getOperand(2); + SmallVector<SDValue, 16> Ops; MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); MVT EltVT = LoVT.getVectorElementType(); @@ -520,7 +520,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDOperand &Lo, // to be legalized, so this makes the code simpler. for (unsigned i = 0; i != LoNumElts; ++i) { unsigned Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getValue(); - SDOperand InVec = N->getOperand(0); + SDValue InVec = N->getOperand(0); if (Idx >= NumElements) { InVec = N->getOperand(1); Idx -= NumElements; @@ -533,7 +533,7 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDOperand &Lo, for (unsigned i = LoNumElts; i != NumElements; ++i) { unsigned Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getValue(); - SDOperand InVec = N->getOperand(0); + SDValue InVec = N->getOperand(0); if (Idx >= NumElements) { InVec = N->getOperand(1); Idx -= NumElements; @@ -544,12 +544,12 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(SDNode *N, SDOperand &Lo, Hi = DAG.getNode(ISD::BUILD_VECTOR, HiVT, &Ops[0], Ops.size()); } -void DAGTypeLegalizer::SplitVecRes_VSETCC(SDNode *N, SDOperand &Lo, - SDOperand &Hi) { +void DAGTypeLegalizer::SplitVecRes_VSETCC(SDNode *N, SDValue &Lo, + SDValue &Hi) { MVT LoVT, HiVT; GetSplitDestVTs(N->getValueType(0), LoVT, HiVT); - SDOperand LL, LH, RL, RH; + SDValue LL, LH, RL, RH; GetSplitVector(N->getOperand(0), LL, LH); GetSplitVector(N->getOperand(1), RL, RH); @@ -568,7 +568,7 @@ void DAGTypeLegalizer::SplitVecRes_VSETCC(SDNode *N, SDOperand &Lo, /// node may need legalization as well as the specified one. bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { DEBUG(cerr << "Split node operand: "; N->dump(&DAG); cerr << "\n"); - SDOperand Res = SDOperand(); + SDValue Res = SDValue(); if (Res.Val == 0) { switch (N->getOpcode()) { @@ -605,15 +605,15 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) { assert(Res.getValueType() == N->getValueType(0) && N->getNumValues() == 1 && "Invalid operand expansion"); - ReplaceValueWith(SDOperand(N, 0), Res); + ReplaceValueWith(SDValue(N, 0), Res); return false; } -SDOperand DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) { +SDValue DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) { // For example, i64 = BIT_CONVERT v4i16 on alpha. Typically the vector will // end up being split all the way down to individual components. Convert the // split pieces into integers and reassemble. - SDOperand Lo, Hi; + SDValue Lo, Hi; GetSplitVector(N->getOperand(0), Lo, Hi); Lo = BitConvertToInteger(Lo); Hi = BitConvertToInteger(Hi); @@ -625,12 +625,12 @@ SDOperand DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) { JoinIntegers(Lo, Hi)); } -SDOperand DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) { +SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) { // We know that the extracted result type is legal. For now, assume the index // is a constant. MVT SubVT = N->getValueType(0); - SDOperand Idx = N->getOperand(1); - SDOperand Lo, Hi; + SDValue Idx = N->getOperand(1); + SDValue Lo, Hi; GetSplitVector(N->getOperand(0), Lo, Hi); uint64_t LoElts = Lo.getValueType().getVectorNumElements(); @@ -646,48 +646,48 @@ SDOperand DAGTypeLegalizer::SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N) { } } -SDOperand DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) { - SDOperand Vec = N->getOperand(0); - SDOperand Idx = N->getOperand(1); +SDValue DAGTypeLegalizer::SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N) { + SDValue Vec = N->getOperand(0); + SDValue Idx = N->getOperand(1); MVT VecVT = Vec.getValueType(); if (isa<ConstantSDNode>(Idx)) { uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getValue(); assert(IdxVal < VecVT.getVectorNumElements() && "Invalid vector index!"); - SDOperand Lo, Hi; + SDValue Lo, Hi; GetSplitVector(Vec, Lo, Hi); uint64_t LoElts = Lo.getValueType().getVectorNumElements(); if (IdxVal < LoElts) - return DAG.UpdateNodeOperands(SDOperand(N, 0), Lo, Idx); + return DAG.UpdateNodeOperands(SDValue(N, 0), Lo, Idx); else - return DAG.UpdateNodeOperands(SDOperand(N, 0), Hi, + return DAG.UpdateNodeOperands(SDValue(N, 0), Hi, DAG.getConstant(IdxVal - LoElts, Idx.getValueType())); } // Store the vector to the stack. MVT EltVT = VecVT.getVectorElementType(); - SDOperand StackPtr = DAG.CreateStackTemporary(VecVT); - SDOperand Store = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0); + SDValue StackPtr = DAG.CreateStackTemporary(VecVT); + SDValue Store = DAG.getStore(DAG.getEntryNode(), Vec, StackPtr, NULL, 0); // Load back the required element. StackPtr = GetVectorElementPointer(StackPtr, EltVT, Idx); return DAG.getLoad(EltVT, Store, StackPtr, NULL, 0); } -SDOperand DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) { +SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) { assert(ISD::isNormalStore(N) && "Truncating store of vector?"); assert(OpNo == 1 && "Can only split the stored value"); - SDOperand Ch = N->getChain(); - SDOperand Ptr = N->getBasePtr(); + SDValue Ch = N->getChain(); + SDValue Ptr = N->getBasePtr(); int SVOffset = N->getSrcValueOffset(); unsigned Alignment = N->getAlignment(); bool isVol = N->isVolatile(); - SDOperand Lo, Hi; + SDValue Lo, Hi; GetSplitVector(N->getOperand(1), Lo, Hi); unsigned IncrementSize = Lo.getValueType().getSizeInBits()/8; @@ -703,9 +703,9 @@ SDOperand DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) { return DAG.getNode(ISD::TokenFactor, MVT::Other, Lo, Hi); } -SDOperand DAGTypeLegalizer::SplitVecOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo){ +SDValue DAGTypeLegalizer::SplitVecOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo){ assert(OpNo == 2 && "Shuffle source type differs from result type?"); - SDOperand Mask = N->getOperand(2); + SDValue Mask = N->getOperand(2); unsigned MaskLength = Mask.getValueType().getVectorNumElements(); unsigned LargestMaskEntryPlusOne = 2 * MaskLength; unsigned MinimumBitWidth = Log2_32_Ceil(LargestMaskEntryPlusOne); @@ -740,13 +740,13 @@ SDOperand DAGTypeLegalizer::SplitVecOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo){ continue; // Success! Rebuild the vector using the legal types. - SmallVector<SDOperand, 16> Ops(MaskLength); + SmallVector<SDValue, 16> Ops(MaskLength); for (unsigned i = 0; i < MaskLength; ++i) { uint64_t Idx = cast<ConstantSDNode>(Mask.getOperand(i))->getValue(); Ops[i] = DAG.getConstant(Idx, OpVT); } - return DAG.UpdateNodeOperands(SDOperand(N,0), + return DAG.UpdateNodeOperands(SDValue(N,0), N->getOperand(0), N->getOperand(1), DAG.getNode(ISD::BUILD_VECTOR, VecVT, &Ops[0], Ops.size())); @@ -756,5 +756,5 @@ SDOperand DAGTypeLegalizer::SplitVecOp_VECTOR_SHUFFLE(SDNode *N, unsigned OpNo){ break; } assert(false && "Failed to find an appropriate mask type!"); - return SDOperand(N, 0); + return SDValue(N, 0); } diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp index ed9bca1f1e..3b2265ebfe 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAG.cpp @@ -136,7 +136,7 @@ void ScheduleDAG::BuildSchedUnits() { // have a user of the flag operand. N = NI; while (N->getValueType(N->getNumValues()-1) == MVT::Flag) { - SDOperand FlagVal(N, N->getNumValues()-1); + SDValue FlagVal(N, N->getNumValues()-1); // There are either zero or one users of the Flag result. bool HasFlagUse = false; @@ -408,11 +408,11 @@ getInstrOperandRegClass(const TargetRegisterInfo *TRI, /// implicit physical register output. void ScheduleDAG::EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, unsigned SrcReg, - DenseMap<SDOperand, unsigned> &VRBaseMap) { + DenseMap<SDValue, unsigned> &VRBaseMap) { unsigned VRBase = 0; if (TargetRegisterInfo::isVirtualRegister(SrcReg)) { // Just use the input register directly! - SDOperand Op(Node, ResNo); + SDValue Op(Node, ResNo); if (IsClone) VRBaseMap.erase(Op); bool isNew = VRBaseMap.insert(std::make_pair(Op, SrcReg)).second; @@ -439,7 +439,7 @@ void ScheduleDAG::EmitCopyFromReg(SDNode *Node, unsigned ResNo, Match = false; } else { for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { - SDOperand Op = User->getOperand(i); + SDValue Op = User->getOperand(i); if (Op.Val != Node || Op.ResNo != ResNo) continue; MVT VT = Node->getValueType(Op.ResNo); @@ -472,7 +472,7 @@ void ScheduleDAG::EmitCopyFromReg(SDNode *Node, unsigned ResNo, TII->copyRegToReg(*BB, BB->end(), VRBase, SrcReg, DstRC, SrcRC); } - SDOperand Op(Node, ResNo); + SDValue Op(Node, ResNo); if (IsClone) VRBaseMap.erase(Op); bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second; @@ -500,7 +500,7 @@ unsigned ScheduleDAG::getDstOfOnlyCopyToRegUse(SDNode *Node, void ScheduleDAG::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI, const TargetInstrDesc &II, - DenseMap<SDOperand, unsigned> &VRBaseMap) { + DenseMap<SDValue, unsigned> &VRBaseMap) { assert(Node->getMachineOpcode() != TargetInstrInfo::IMPLICIT_DEF && "IMPLICIT_DEF should have been handled as a special case elsewhere!"); @@ -533,7 +533,7 @@ void ScheduleDAG::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI, MI->addOperand(MachineOperand::CreateReg(VRBase, true)); } - SDOperand Op(Node, i); + SDValue Op(Node, i); bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second; isNew = isNew; // Silence compiler warning. assert(isNew && "Node emitted out of order - early"); @@ -542,8 +542,8 @@ void ScheduleDAG::CreateVirtualRegisters(SDNode *Node, MachineInstr *MI, /// getVR - Return the virtual register corresponding to the specified result /// of the specified node. -unsigned ScheduleDAG::getVR(SDOperand Op, - DenseMap<SDOperand, unsigned> &VRBaseMap) { +unsigned ScheduleDAG::getVR(SDValue Op, + DenseMap<SDValue, unsigned> &VRBaseMap) { if (Op.isMachineOpcode() && Op.getMachineOpcode() == TargetInstrInfo::IMPLICIT_DEF) { // Add an IMPLICIT_DEF instruction before every use. @@ -558,7 +558,7 @@ unsigned ScheduleDAG::getVR(SDOperand Op, return VReg; } - DenseMap<SDOperand, unsigned>::iterator I = VRBaseMap.find(Op); + DenseMap<SDValue, unsigned>::iterator I = VRBaseMap.find(Op); assert(I != VRBaseMap.end() && "Node emitted out of order - late"); return I->second; } @@ -568,10 +568,10 @@ unsigned ScheduleDAG::getVR(SDOperand Op, /// specifies the instruction information for the node, and IIOpNum is the /// operand number (in the II) that we are adding. IIOpNum and II are used for /// assertions only. -void ScheduleDAG::AddOperand(MachineInstr *MI, SDOperand Op, +void ScheduleDAG::AddOperand(MachineInstr *MI, SDValue Op, unsigned IIOpNum, const TargetInstrDesc *II, - DenseMap<SDOperand, unsigned> &VRBaseMap) { + DenseMap<SDValue, unsigned> &VRBaseMap) { if (Op.isMachineOpcode()) { // Note that this case is redundant with the final else block, but we // include it because it is the most common and it makes the logic @@ -702,7 +702,7 @@ getSuperRegisterRegClass(const TargetRegisterClass *TRC, /// EmitSubregNode - Generate machine code for subreg nodes. /// void ScheduleDAG::EmitSubregNode(SDNode *Node, - DenseMap<SDOperand, unsigned> &VRBaseMap) { + DenseMap<SDValue, unsigned> &VRBaseMap) { unsigned VRBase = 0; unsigned Opc = Node->getMachineOpcode(); @@ -752,9 +752,9 @@ void ScheduleDAG::EmitSubregNode(SDNode *Node, BB->push_back(MI); } else if (Opc == TargetInstrInfo::INSERT_SUBREG || Opc == TargetInstrInfo::SUBREG_TO_REG) { - SDOperand N0 = Node->getOperand(0); - SDOperand N1 = Node->getOperand(1); - SDOperand N2 = Node->getOperand(2); + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + SDValue N2 = Node->getOperand(2); unsigned SubReg = getVR(N1, VRBaseMap); unsigned SubIdx = cast<ConstantSDNode>(N2)->getValue(); @@ -788,7 +788,7 @@ void ScheduleDAG::EmitSubregNode(SDNode *Node, } else assert(0 && "Node is not insert_subreg, extract_subreg, or subreg_to_reg"); - SDOperand Op(Node, 0); + SDValue Op(Node, 0); bool isNew = VRBaseMap.insert(std::make_pair(Op, VRBase)).second; isNew = isNew; // Silence compiler warning. assert(isNew && "Node emitted out of order - early"); @@ -797,7 +797,7 @@ void ScheduleDAG::EmitSubregNode(SDNode *Node, /// EmitNode - Generate machine code for an node and needed dependencies. /// void ScheduleDAG::EmitNode(SDNode *Node, bool IsClone, - DenseMap<SDOperand, unsigned> &VRBaseMap) { + DenseMap<SDValue, unsigned> &VRBaseMap) { // If machine instruction if (Node->isMachineOpcode()) { unsigned Opc = Node->getMachineOpcode(); @@ -891,7 +891,7 @@ void ScheduleDAG::EmitNode(SDNode *Node, bool IsClone, break; case ISD::CopyToReg: { unsigned SrcReg; - SDOperand SrcVal = Node->getOperand(2); + SDValue SrcVal = Node->getOperand(2); if (RegisterSDNode *R = dyn_cast<RegisterSDNode>(SrcVal)) SrcReg = R->getReg(); else @@ -1099,7 +1099,7 @@ MachineBasicBlock *ScheduleDAG::EmitSchedule() { } // Finally, emit the code for all of the scheduled instructions. - DenseMap<SDOperand, unsigned> VRBaseMap; + DenseMap<SDValue, unsigned> VRBaseMap; DenseMap<SUnit*, unsigned> CopyVRBaseMap; for (unsigned i = 0, e = Sequence.size(); i != e; i++) { SUnit *SU = Sequence[i]; diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp index 43fbd4b050..bd46f8e550 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -651,7 +651,7 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { TryUnfold = true; } for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { - const SDOperand &Op = N->getOperand(i); + const SDValue &Op = N->getOperand(i); MVT VT = Op.Val->getValueType(Op.ResNo); if (VT == MVT::Flag) return NULL; @@ -670,9 +670,9 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { unsigned NumVals = N->getNumValues(); unsigned OldNumVals = SU->Node->getNumValues(); for (unsigned i = 0; i != NumVals; ++i) - DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i)); - DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1), - SDOperand(LoadNode, 1)); + DAG.ReplaceAllUsesOfValueWith(SDValue(SU->Node, i), SDValue(N, i)); + DAG.ReplaceAllUsesOfValueWith(SDValue(SU->Node, OldNumVals-1), + SDValue(LoadNode, 1)); SUnit *NewSU = CreateNewSUnit(N); assert(N->getNodeId() == -1 && "Node already inserted!"); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index c37fa38a05..c99d504f27 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -110,7 +110,7 @@ bool ISD::isBuildVectorAllOnes(const SDNode *N) { // Do not accept build_vectors that aren't all constants or which have non-~0 // elements. - SDOperand NotZero = N->getOperand(i); + SDValue NotZero = N->getOperand(i); if (isa<ConstantSDNode>(NotZero)) { if (!cast<ConstantSDNode>(NotZero)->isAllOnesValue()) return false; @@ -151,7 +151,7 @@ bool ISD::isBuildVectorAllZeros(const SDNode *N) { // Do not accept build_vectors that aren't all constants or which have non-~0 // elements. - SDOperand Zero = N->getOperand(i); + SDValue Zero = N->getOperand(i); if (isa<ConstantSDNode>(Zero)) { if (!cast<ConstantSDNode>(Zero)->isNullValue()) return false; @@ -183,7 +183,7 @@ bool ISD::isScalarToVector(const SDNode *N) { return false; unsigned NumElems = N->getNumOperands(); for (unsigned i = 1; i < NumElems; ++i) { - SDOperand V = N->getOperand(i); + SDValue V = N->getOperand(i); if (V.getOpcode() != ISD::UNDEF) return false; } @@ -194,7 +194,7 @@ bool ISD::isScalarToVector(const SDNode *N) { /// isDebugLabel - Return true if the specified node represents a debug /// label (i.e. ISD::DBG_LABEL or TargetInstrInfo::DBG_LABEL node). bool ISD::isDebugLabel(const SDNode *N) { - SDOperand Zero; + SDValue Zero; if (N->getOpcode() == ISD::DBG_LABEL) return true; if (N->isMachineOpcode() && @@ -323,7 +323,7 @@ static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) { /// AddNodeIDOperands - Various routines for adding operands to the NodeID data. /// static void AddNodeIDOperands(FoldingSetNodeID &ID, - const SDOperand *Ops, unsigned NumOps) { + const SDValue *Ops, unsigned NumOps) { for (; NumOps; --NumOps, ++Ops) { ID.AddPointer(Ops->Val); ID.AddInteger(Ops->ResNo); @@ -335,14 +335,14 @@ static void AddNodeIDOperands(FoldingSetNodeID &ID, static void AddNodeIDOperands(FoldingSetNodeID &ID, const SDUse *Ops, unsigned NumOps) { for (; NumOps; --NumOps, ++Ops) { - ID.AddPointer(Ops->getSDOperand().Val); - ID.AddInteger(Ops->getSDOperand().ResNo); + ID.AddPointer(Ops->getVal()); + ID.AddInteger(Ops->getSDValue().ResNo); } } static void AddNodeIDNode(FoldingSetNodeID &ID, unsigned short OpC, SDVTList VTList, - const SDOperand *OpList, unsigned N) { + const SDValue *OpList, unsigned N) { AddNodeIDOpcode(ID, OpC); AddNodeIDValueTypes(ID, VTList); AddNodeIDOperands(ID, OpList, N); @@ -649,7 +649,7 @@ SDNode *SelectionDAG::AddNonLeafNodeToCSEMaps(SDNode *N) { /// were replaced with those specified. If this node is never memoized, /// return null, otherwise return a pointer to the slot it would take. If a /// node already exists with these operands, the slot will be non-null. -SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDOperand Op, +SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos) { if (N->getValueType(0) == MVT::Flag) return 0; // Never CSE anything that produces a flag. @@ -668,7 +668,7 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDOperand Op, if (N->getValueType(i) == MVT::Flag) return 0; // Never CSE anything that produces a flag. - SDOperand Ops[] = { Op }; + SDValue Ops[] = { Op }; FoldingSetNodeID ID; AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1); return CSEMap.FindNodeOrInsertPos(ID, InsertPos); @@ -679,7 +679,7 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDOperand Op, /// return null, otherwise return a pointer to the slot it would take. If a /// node already exists with these operands, the slot will be non-null. SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, - SDOperand Op1, SDOperand Op2, + SDValue Op1, SDValue Op2, void *&InsertPos) { if (N->getOpcode() == ISD::HANDLENODE || N->getValueType(0) == MVT::Flag) @@ -688,7 +688,7 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, if (N->getValueType(i) == MVT::Flag) return 0; // Never CSE anything that produces a flag. - SDOperand Ops[] = { Op1, Op2 }; + SDValue Ops[] = { Op1, Op2 }; FoldingSetNodeID ID; AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2); return CSEMap.FindNodeOrInsertPos(ID, InsertPos); @@ -700,7 +700,7 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, /// return null, otherwise return a pointer to the slot it would take. If a /// node already exists with these operands, the slot will be non-null. SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, - const SDOperand *Ops,unsigned NumOps, + const SDValue *Ops,unsigned NumOps, void *&InsertPos) { if (N->getValueType(0) == MVT::Flag) return 0; // Never CSE anything that produces a flag. @@ -752,7 +752,7 @@ void SelectionDAG::VerifyNode(SDNode *N) { "Wrong number of BUILD_VECTOR operands!"); MVT EltVT = N->getValueType(0).getVectorElementType(); for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) - assert(I->getSDOperand().getValueType() == EltVT && + assert(I->getSDValue().getValueType() == EltVT && "Wrong BUILD_VECTOR operand type!"); break; } @@ -783,7 +783,7 @@ SelectionDAG::~SelectionDAG() { } } -SDOperand SelectionDAG::getZeroExtendInReg(SDOperand Op, MVT VT) { +SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, MVT VT) { if (Op.getValueType() == VT) return Op; APInt Imm = APInt::getLowBitsSet(Op.getValueSizeInBits(), VT.getSizeInBits()); @@ -791,12 +791,12 @@ SDOperand SelectionDAG::getZeroExtendInReg(SDOperand Op, MVT VT) { getConstant(Imm, Op.getValueType())); } -SDOperand SelectionDAG::getConstant(uint64_t Val, MVT VT, bool isT) { +SDValue SelectionDAG::getConstant(uint64_t Val, MVT VT, bool isT) { MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT; return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT); } -SDOperand SelectionDAG::getConstant(const APInt &Val, MVT VT, bool isT) { +SDValue SelectionDAG::getConstant(const APInt &Val, MVT VT, bool isT) { assert(VT.isInteger() && "Cannot create FP integer constant!"); MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT; @@ -811,7 +811,7 @@ SDOperand SelectionDAG::getConstant(const APInt &Val, MVT VT, bool isT) { SDNode *N = NULL; if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) if (!VT.isVector()) - return SDOperand(N, 0); + return SDValue(N, 0); if (!N) { N = getAllocator().Allocate<ConstantSDNode>(); new (N) ConstantSDNode(isT, Val, EltVT); @@ -819,21 +819,21 @@ SDOperand SelectionDAG::getConstant(const APInt &Val, MVT VT, bool isT) { AllNodes.push_back(N); } - SDOperand Result(N, 0); + SDValue Result(N, 0); if (VT.isVector()) { - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; Ops.assign(VT.getVectorNumElements(), Result); Result = getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); } return Result; } -SDOperand SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) { +SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) { return getConstant(Val, TLI.getPointerTy(), isTarget); } -SDOperand SelectionDAG::getConstantFP(const APFloat& V, MVT VT, bool isTarget) { +SDValue SelectionDAG::getConstantFP(const APFloat& V, MVT VT, bool isTarget) { assert(VT.isFloatingPoint() && "Cannot create integer FP constant!"); MVT EltVT = @@ -850,7 +850,7 @@ SDOperand SelectionDAG::getConstantFP(const APFloat& V, MVT VT, bool isTarget) { SDNode *N = NULL; if ((N = CSEMap.FindNodeOrInsertPos(ID, IP))) if (!VT.isVector()) - return SDOperand(N, 0); + return SDValue(N, 0); if (!N) { N = getAllocator().Allocate<ConstantFPSDNode>(); new (N) ConstantFPSDNode(isTarget, V, EltVT); @@ -858,16 +858,16 @@ SDOperand SelectionDAG::getConstantFP(const APFloat& V, MVT VT, bool isTarget) { AllNodes.push_back(N); } - SDOperand Result(N, 0); + SDValue Result(N, 0); if (VT.isVector()) { - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; Ops.assign(VT.getVectorNumElements(), Result); Result = getNode(ISD::BUILD_VECTOR, VT, &Ops[0], Ops.size()); } return Result; } -SDOperand SelectionDAG::getConstantFP(double Val, MVT VT, bool isTarget) { +SDValue SelectionDAG::getConstantFP(double Val, MVT VT, bool isTarget) { MVT EltVT = VT.isVector() ? VT.getVectorElementType() : VT; if (EltVT==MVT::f32) @@ -876,9 +876,9 @@ SDOperand SelectionDAG::getConstantFP(double Val, MVT VT, bool isTarget) { return getConstantFP(APFloat(Val), VT, isTarget); } -SDOperand SelectionDAG::getGlobalAddress(const GlobalValue *GV, - MVT VT, int Offset, - bool isTargetGA) { +SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, + MVT VT, int Offset, + bool isTargetGA) { unsigned Opc; const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); @@ -899,47 +899,47 @@ SDOperand SelectionDAG::getGlobalAddress(const GlobalValue *GV, ID.AddInteger(Offset); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<GlobalAddressSDNode>(); new (N) GlobalAddressSDNode(isTargetGA, GV, VT, Offset); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getFrameIndex(int FI, MVT VT, bool isTarget) { +SDValue SelectionDAG::getFrameIndex(int FI, MVT VT, bool isTarget) { unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); ID.AddInteger(FI); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<FrameIndexSDNode>(); new (N) FrameIndexSDNode(FI, VT, isTarget); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getJumpTable(int JTI, MVT VT, bool isTarget){ +SDValue SelectionDAG::getJumpTable(int JTI, MVT VT, bool isTarget){ unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); ID.AddInteger(JTI); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<JumpTableSDNode>(); new (N) JumpTableSDNode(JTI, VT, isTarget); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getConstantPool(Constant *C, MVT VT, - unsigned Alignment, int Offset, - bool isTarget) { +SDValue SelectionDAG::getConstantPool(Constant *C, MVT VT, + unsigned Alignment, int Offset, + bool isTarget) { unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); @@ -948,18 +948,18 @@ SDOperand SelectionDAG::getConstantPool(Constant *C, MVT VT, ID.AddPointer(C); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<ConstantPoolSDNode>(); new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getConstantPool(MachineConstantPoolValue *C, MVT VT, - unsigned Alignment, int Offset, - bool isTarget) { +SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, MVT VT, + unsigned Alignment, int Offset, + bool isTarget) { unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); @@ -968,76 +968,76 @@ SDOperand SelectionDAG::getConstantPool(MachineConstantPoolValue *C, MVT VT, C->AddSelectionDAGCSEId(ID); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<ConstantPoolSDNode>(); new (N) ConstantPoolSDNode(isTarget, C, VT, Offset, Alignment); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { +SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) { FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0); ID.AddPointer(MBB); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<BasicBlockSDNode>(); new (N) BasicBlockSDNode(MBB); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getArgFlags(ISD::ArgFlagsTy Flags) { +SDValue SelectionDAG::getArgFlags(ISD::ArgFlagsTy Flags) { FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::ARG_FLAGS, getVTList(MVT::Other), 0, 0); ID.AddInteger(Flags.getRawBits()); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<ARG_FLAGSSDNode>(); new (N) ARG_FLAGSSDNode(Flags); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getValueType(MVT VT) { +SDValue SelectionDAG::getValueType(MVT VT) { if (VT.isSimple() && (unsigned)VT.getSimpleVT() >= ValueTypeNodes.size()) ValueTypeNodes.resize(VT.getSimpleVT()+1); SDNode *&N = VT.isExtended() ? ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT()]; - if (N) return SDOperand(N, 0); + if (N) return SDValue(N, 0); N = getAllocator().Allocate<VTSDNode>(); new (N) VTSDNode(VT); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getExternalSymbol(const char *Sym, MVT VT) { +SDValue SelectionDAG::getExternalSymbol(const char *Sym, MVT VT) { SDNode *&N = ExternalSymbols[Sym]; - if (N) return SDOperand(N, 0); + if (N) return SDValue(N, 0); N = getAllocator().Allocate<ExternalSymbolSDNode>(); new (N) ExternalSymbolSDNode(false, Sym, VT); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getTargetExternalSymbol(const char *Sym, MVT VT) { +SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, MVT VT) { SDNode *&N = TargetExternalSymbols[Sym]; - if (N) return SDOperand(N, 0); + if (N) return SDValue(N, 0); N = getAllocator().Allocate<ExternalSymbolSDNode>(); new (N) ExternalSymbolSDNode(true, Sym, VT); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getCondCode(ISD::CondCode Cond) { +SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) { if ((unsigned)Cond >= CondCodeNodes.size()) CondCodeNodes.resize(Cond+1); @@ -1047,50 +1047,50 @@ SDOperand SelectionDAG::getCondCode(ISD::CondCode Cond) { CondCodeNodes[Cond] = N; AllNodes.push_back(N); } - return SDOperand(CondCodeNodes[Cond], 0); + return SDValue(CondCodeNodes[Cond], 0); } -SDOperand SelectionDAG::getRegister(unsigned RegNo, MVT VT) { +SDValue SelectionDAG::getRegister(unsigned RegNo, MVT VT) { FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0); ID.AddInteger(RegNo); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<RegisterSDNode>(); new (N) RegisterSDNode(RegNo, VT); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getDbgStopPoint(SDOperand Root, - unsigned Line, unsigned Col, - const CompileUnitDesc *CU) { +SDValue SelectionDAG::getDbgStopPoint(SDValue Root, + unsigned Line, unsigned Col, + const CompileUnitDesc *CU) { SDNode *N = getAllocator().Allocate<DbgStopPointSDNode>(); new (N) DbgStopPointSDNode(Root, Line, Col, CU); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getLabel(unsigned Opcode, - SDOperand Root, - unsigned LabelID) { +SDValue SelectionDAG::getLabel(unsigned Opcode, + SDValue Root, + unsigned LabelID) { FoldingSetNodeID ID; - SDOperand Ops[] = { Root }; + SDValue Ops[] = { Root }; AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), &Ops[0], 1); ID.AddInteger(LabelID); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<LabelSDNode>(); new (N) LabelSDNode(Opcode, Root, LabelID); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getSrcValue(const Value *V) { +SDValue SelectionDAG::getSrcValue(const Value *V) { assert((!V || isa<PointerType>(V->getType())) && "SrcValue is not a pointer?"); @@ -1100,16 +1100,16 @@ SDOperand SelectionDAG::getSrcValue(const Value *V) { void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<SrcValueSDNode>(); new (N) SrcValueSDNode(V); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getMemOperand(const MachineMemOperand &MO) { +SDValue SelectionDAG::getMemOperand(const MachineMemOperand &MO) { const Value *v = MO.getValue(); assert((!v || isa<PointerType>(v->getType())) && "SrcValue is not a pointer?"); @@ -1124,18 +1124,18 @@ SDOperand SelectionDAG::getMemOperand(const MachineMemOperand &MO) { void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<MemOperandSDNode>(); new (N) MemOperandSDNode(MO); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } /// CreateStackTemporary - Create a stack temporary, suitable for holding the /// specified value type. -SDOperand SelectionDAG::CreateStackTemporary(MVT VT, unsigned minAlign) { +SDValue SelectionDAG::CreateStackTemporary(MVT VT, unsigned minAlign) { MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo(); unsigned ByteSize = VT.getSizeInBits()/8; const Type *Ty = VT.getTypeForMVT(); @@ -1146,8 +1146,8 @@ SDOperand SelectionDAG::CreateStackTemporary(MVT VT, unsigned minAlign) { return getFrameIndex(FrameIdx, TLI.getPointerTy()); } -SDOperand SelectionDAG::FoldSetCC(MVT VT, SDOperand N1, - SDOperand N2, ISD::CondCode Cond) { +SDValue SelectionDAG::FoldSetCC(MVT VT, SDValue N1, + SDValue N2, ISD::CondCode Cond) { // These setcc operations always fold. switch (Cond) { default: break; @@ -1194,7 +1194,7 @@ SDOperand SelectionDAG::FoldSetCC(MVT VT, SDOperand N1, if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.Val)) { // No compile time operations on this type yet. if (N1C->getValueType(0) == MVT::ppcf128) - return SDOperand(); + return SDValue(); APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); switch (Cond) { @@ -1245,12 +1245,12 @@ SDOperand SelectionDAG::FoldSetCC(MVT VT, SDOperand N1, } // Could not fold it. - return SDOperand(); + return SDValue(); } /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We /// use this predicate to simplify operations downstream. -bool SelectionDAG::SignBitIsZero(SDOperand Op, unsigned Depth) const { +bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const { unsigned BitWidth = Op.getValueSizeInBits(); return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth); } @@ -1258,7 +1258,7 @@ bool SelectionDAG::SignBitIsZero(SDOperand Op, unsigned Depth) const { /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use /// this predicate to simplify operations downstream. Mask is known to be zero /// for bits that V cannot have. -bool SelectionDAG::MaskedValueIsZero(SDOperand Op, const APInt &Mask, +bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth) const { APInt KnownZero, KnownOne; ComputeMaskedBits(Op, Mask, KnownZero, KnownOne, Depth); @@ -1270,7 +1270,7 @@ bool SelectionDAG::MaskedValueIsZero(SDOperand Op, const APInt &Mask, /// known to be either zero or one and return them in the KnownZero/KnownOne /// bitsets. This code only analyzes bits in Mask, in order to short-circuit /// processing. -void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, +void SelectionDAG::ComputeMaskedBits(SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, unsigned Depth) const { unsigned BitWidth = Mask.getBitWidth(); @@ -1720,7 +1720,7 @@ void SelectionDAG::ComputeMaskedBits(SDOperand Op, const APInt &Mask, /// is always equal to the sign bit (itself), but other cases can give us /// information. For example, immediately after an "SRA X, 2", we know that /// the top 3 bits are all equal to each other, so we return 3. -unsigned SelectionDAG::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{ +unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{ MVT VT = Op.getValueType(); assert(VT.isInteger() && "Invalid VT!"); unsigned VTBits = VT.getSizeInBits(); @@ -1932,7 +1932,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDOperand Op, unsigned Depth) const{ } -bool SelectionDAG::isVerifiedDebugInfoDesc(SDOperand Op) const { +bool SelectionDAG::isVerifiedDebugInfoDesc(SDValue Op) const { GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); if (!GA) return false; GlobalVariable *GV = dyn_cast<GlobalVariable>(GA->getGlobal()); @@ -1944,21 +1944,21 @@ bool SelectionDAG::isVerifiedDebugInfoDesc(SDOperand Op) const { /// getShuffleScalarElt - Returns the scalar element that will make up the ith /// element of the result of the vector shuffle. -SDOperand SelectionDAG::getShuffleScalarElt(const SDNode *N, unsigned i) { +SDValue SelectionDAG::getShuffleScalarElt(const SDNode *N, unsigned i) { MVT VT = N->getValueType(0); - SDOperand PermMask = N->getOperand(2); - SDOperand Idx = PermMask.getOperand(i); + SDValue PermMask = N->getOperand(2); + SDValue Idx = PermMask.getOperand(i); if (Idx.getOpcode() == ISD::UNDEF) return getNode(ISD::UNDEF, VT.getVectorElementType()); unsigned Index = cast<ConstantSDNode>(Idx)->getValue(); unsigned NumElems = PermMask.getNumOperands(); - SDOperand V = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); + SDValue V = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); Index %= NumElems; if (V.getOpcode() == ISD::BIT_CONVERT) { V = V.getOperand(0); if (V.getValueType().getVectorNumElements() != NumElems) - return SDOperand(); + return SDValue(); } if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) return (Index == 0) ? V.getOperand(0) @@ -1967,18 +1967,18 @@ SDOperand SelectionDAG::getShuffleScalarElt(const SDNode *N, unsigned i) { return V.getOperand(Index); if (V.getOpcode() == ISD::VECTOR_SHUFFLE) return getShuffleScalarElt(V.Val, Index); - return SDOperand(); + return SDValue(); } /// getNode - Gets or creates the specified node. /// -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT) { +SDValue SelectionDAG::getNode(unsigned Opcode, MVT VT) { FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<SDNode>(); new (N) SDNode(Opcode, SDNode::getSDVTList(VT)); CSEMap.InsertNode(N, IP); @@ -1987,10 +1987,10 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT) { #ifndef NDEBUG VerifyNode(N); #endif - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperand Operand) { +SDValue SelectionDAG::getNode(unsigned Opcode, MVT VT, SDValue Operand) { // Constant fold unary operations with an integer constant operand. if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.Val)) { const APInt &Val = C->getAPIntValue(); @@ -2171,11 +2171,11 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperand Operand) { SDVTList VTs = getVTList(VT); if (VT != MVT::Flag) { // Don't CSE flag producing nodes FoldingSetNodeID ID; - SDOperand Ops[1] = { Operand }; + SDValue Ops[1] = { Operand }; AddNodeIDNode(ID, Opcode, VTs, Ops, 1); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); N = getAllocator().Allocate<UnarySDNode>(); new (N) UnarySDNode(Opcode, VTs, Operand); CSEMap.InsertNode(N, IP); @@ -2188,11 +2188,11 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDOperand Operand) { #ifndef NDEBUG VerifyNode(N); #endif - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, - SDOperand N1, SDOperand N2) { +SDValue SelectionDAG::getNode(unsigned Opcode, MVT VT, + SDValue N1, SDValue N2) { ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.Val); switch (Opcode) { @@ -2524,12 +2524,12 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDNode *N; SDVTList VTs = getVTList(VT); if (VT != MVT::Flag) { - SDOperand Ops[] = { N1, N2 }; + SDValue Ops[] = { N1, N2 }; FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, VTs, Ops, 2); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); N = getAllocator().Allocate<BinarySDNode>(); new (N) BinarySDNode(Opcode, VTs, N1, N2); CSEMap.InsertNode(N, IP); @@ -2542,18 +2542,18 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, #ifndef NDEBUG VerifyNode(N); #endif - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, - SDOperand N1, SDOperand N2, SDOperand N3) { +SDValue SelectionDAG::getNode(unsigned Opcode, MVT VT, + SDValue N1, SDValue N2, SDValue N3) { // Perform various simplifications. ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.Val); ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.Val); switch (Opcode) { case ISD::SETCC: { // Use FoldSetCC to simplify SETCC's. - SDOperand Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get()); + SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get()); if (Simp.Val) return Simp; break; } @@ -2593,12 +2593,12 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, SDNode *N; SDVTList VTs = getVTList(VT); if (VT != MVT::Flag) { - SDOperand Ops[] = { N1, N2, N3 }; + SDValue Ops[] = { N1, N2, N3 }; FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, VTs, Ops, 3); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); N = getAllocator().Allocate<TernarySDNode>(); new (N) TernarySDNode(Opcode, VTs, N1, N2, N3); CSEMap.InsertNode(N, IP); @@ -2610,26 +2610,26 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, #ifndef NDEBUG VerifyNode(N); #endif - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, - SDOperand N1, SDOperand N2, SDOperand N3, - SDOperand N4) { - SDOperand Ops[] = { N1, N2, N3, N4 }; +SDValue SelectionDAG::getNode(unsigned Opcode, MVT VT, + SDValue N1, SDValue N2, SDValue N3, + SDValue N4) { + SDValue Ops[] = { N1, N2, N3, N4 }; return getNode(Opcode, VT, Ops, 4); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, - SDOperand N1, SDOperand N2, SDOperand N3, - SDOperand N4, SDOperand N5) { - SDOperand Ops[] = { N1, N2, N3, N4, N5 }; +SDValue SelectionDAG::getNode(unsigned Opcode, MVT VT, + SDValue N1, SDValue N2, SDValue N3, + SDValue N4, SDValue N5) { + SDValue Ops[] = { N1, N2, N3, N4, N5 }; return getNode(Opcode, VT, Ops, 5); } /// getMemsetValue - Vectorized representation of the memset value /// operand. -static SDOperand getMemsetValue(SDOperand Value, MVT VT, SelectionDAG &DAG) { +static SDValue getMemsetValue(SDValue Value, MVT VT, SelectionDAG &DAG) { unsigned NumBits = VT.isVector() ? VT.getVectorElementType().getSizeInBits() : VT.getSizeInBits(); if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) { @@ -2659,7 +2659,7 @@ static SDOperand getMemsetValue(SDOperand Value, MVT VT, SelectionDAG &DAG) { /// getMemsetStringVal - Similar to getMemsetValue. Except this is only /// used when a memcpy is turned into a memset when the source is a constant /// string ptr. -static SDOperand getMemsetStringVal(MVT VT, SelectionDAG &DAG, +static SDValue getMemsetStringVal(MVT VT, SelectionDAG &DAG, const TargetLowering &TLI, std::string &Str, unsigned Offset) { // Handle vector with all elements zero. @@ -2687,7 +2687,7 @@ static SDOperand getMemsetStringVal(MVT VT, SelectionDAG &DAG, /// getMemBasePlusOffset - Returns base and offset node for the /// -static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset, +static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset, SelectionDAG &DAG) { MVT VT = Base.getValueType(); return DAG.getNode(ISD::ADD, VT, Base, DAG.getConstant(Offset, VT)); @@ -2695,7 +2695,7 @@ static SDOperand getMemBasePlusOffset(SDOperand Base, unsigned Offset, /// isMemSrcFromString - Returns true if memcpy source is a string constant. /// -static bool isMemSrcFromString(SDOperand Src, std::string &Str) { +static bool isMemSrcFromString(SDValue Src, std::string &Str) { unsigned SrcDelta = 0; GlobalAddressSDNode *G = NULL; if (Src.getOpcode() == ISD::GlobalAddress) @@ -2721,7 +2721,7 @@ static bool isMemSrcFromString(SDOperand Src, std::string &Str) { /// types of the sequence of memory ops to perform memset / memcpy. static bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps, - SDOperand Dst, SDOperand Src, + SDValue Dst, SDValue Src, unsigned Limit, uint64_t Size, unsigned &Align, std::string &Str, bool &isSrcStr, SelectionDAG &DAG, @@ -2802,9 +2802,9 @@ bool MeetsMaxMemopRequirement(std::vector<MVT> &MemOps, return true; } -static SDOperand getMemcpyLoadsAndStores(SelectionDAG &DAG, - SDOperand Chain, SDOperand Dst, - SDOperand Src, uint64_t Size, +static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, + SDValue Chain, SDValue Dst, + SDValue Src, uint64_t Size, unsigned Align, bool AlwaysInline, const Value *DstSV, uint64_t DstSVOff, const Value *SrcSV, uint64_t SrcSVOff){ @@ -2821,17 +2821,17 @@ static SDOperand getMemcpyLoadsAndStores(SelectionDAG &DAG, bool CopyFromStr; if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign, Str, CopyFromStr, DAG, TLI)) - return SDOperand(); + return SDValue(); bool isZeroStr = CopyFromStr && Str.empty(); - SmallVector<SDOperand, 8> OutChains; + SmallVector<SDValue, 8> OutChains; unsigned NumMemOps = MemOps.size(); uint64_t SrcOff = 0, DstOff = 0; for (unsigned i = 0; i < NumMemOps; i++) { MVT VT = MemOps[i]; unsigned VTSize = VT.getSizeInBits() / 8; - SDOperand Value, Store; + SDValue Value, Store; if (CopyFromStr && (isZeroStr || !VT.isVector())) { // It's unlikely a store of a vector immediate can be done in a single @@ -2860,9 +2860,9 @@ static SDOperand getMemcpyLoadsAndStores(SelectionDAG &DAG, &OutChains[0], OutChains.size()); } -static SDOperand getMemmoveLoadsAndStores(SelectionDAG &DAG, - SDOperand Chain, SDOperand Dst, - SDOperand Src, uint64_t Size, +static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, + SDValue Chain, SDValue Dst, + SDValue Src, uint64_t Size, unsigned Align, bool AlwaysInline, const Value *DstSV, uint64_t DstSVOff, const Value *SrcSV, uint64_t SrcSVOff){ @@ -2879,18 +2879,18 @@ static SDOperand getMemmoveLoadsAndStores(SelectionDAG &DAG, bool CopyFromStr; if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, Limit, Size, DstAlign, Str, CopyFromStr, DAG, TLI)) - return SDOperand(); + return SDValue(); uint64_t SrcOff = 0, DstOff = 0; - SmallVector<SDOperand, 8> LoadValues; - SmallVector<SDOperand, 8> LoadChains; - SmallVector<SDOperand, 8> OutChains; + SmallVector<SDValue, 8> LoadValues; + SmallVector<SDValue, 8> LoadChains; + SmallVector<SDValue, 8> OutChains; unsigned NumMemOps = MemOps.size(); for (unsigned i = 0; i < NumMemOps; i++) { MVT VT = MemOps[i]; unsigned VTSize = VT.getSizeInBits() / 8; - SDOperand Value, Store; + SDValue Value, Store; Value = DAG.getLoad(VT, Chain, getMemBasePlusOffset(Src, SrcOff, DAG), @@ -2905,7 +2905,7 @@ static SDOperand getMemmoveLoadsAndStores(SelectionDAG &DAG, for (unsigned i = 0; i < NumMemOps; i++) { MVT VT = MemOps[i]; unsigned VTSize = VT.getSizeInBits() / 8; - SDOperand Value, Store; + SDValue Value, Store; Store = DAG.getStore(Chain, LoadValues[i], getMemBasePlusOffset(Dst, DstOff, DAG), @@ -2918,9 +2918,9 @@ static SDOperand getMemmoveLoadsAndStores(SelectionDAG &DAG, &OutChains[0], OutChains.size()); } -static SDOperand getMemsetStores(SelectionDAG &DAG, - SDOperand Chain, SDOperand Dst, - SDOperand Src, uint64_t Size, +static SDValue getMemsetStores(SelectionDAG &DAG, + SDValue Chain, SDValue Dst, + SDValue Src, uint64_t Size, unsigned Align, const Value *DstSV, uint64_t DstSVOff) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -2932,17 +2932,17 @@ static SDOperand getMemsetStores(SelectionDAG &DAG, bool CopyFromStr; if (!MeetsMaxMemopRequirement(MemOps, Dst, Src, TLI.getMaxStoresPerMemset(), Size, Align, Str, CopyFromStr, DAG, TLI)) - return SDOperand(); + return SDValue(); - SmallVector<SDOperand, 8> OutChains; + SmallVector<SDValue, 8> OutChains; uint64_t DstOff = 0; unsigned NumMemOps = MemOps.size(); for (unsigned i = 0; i < NumMemOps; i++) { MVT VT = MemOps[i]; unsigned VTSize = VT.getSizeInBits() / 8; - SDOperand Value = getMemsetValue(Src, VT, DAG); - SDOperand Store = DAG.getStore(Chain, Value, + SDValue Value = getMemsetValue(Src, VT, DAG); + SDValue Store = DAG.getStore(Chain, Value, getMemBasePlusOffset(Dst, DstOff, DAG), DstSV, DstSVOff + DstOff); OutChains.push_back(Store); @@ -2953,11 +2953,11 @@ static SDOperand getMemsetStores(SelectionDAG &DAG, &OutChains[0], OutChains.size()); } -SDOperand SelectionDAG::getMemcpy(SDOperand Chain, SDOperand Dst, - SDOperand Src, SDOperand Size, - unsigned Align, bool AlwaysInline, - const Value *DstSV, uint64_t DstSVOff, - const Value *SrcSV, uint64_t SrcSVOff) { +SDValue SelectionDAG::getMemcpy(SDValue Chain, SDValue Dst, + SDValue Src, SDValue Size, + unsigned Align, bool AlwaysInline, + const Value *DstSV, uint64_t DstSVOff, + const Value *SrcSV, uint64_t SrcSVOff) { // Check to see if we should lower the memcpy to loads and stores first. // For cases within the target-specified limits, this is the best choice. @@ -2967,7 +2967,7 @@ SDOperand SelectionDAG::getMemcpy(SDOperand Chain, SDOperand Dst, if (ConstantSize->isNullValue()) return Chain; - SDOperand Result = + SDValue Result = getMemcpyLoadsAndStores(*this, Chain, Dst, Src, ConstantSize->getValue(), Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff); if (Result.Val) @@ -2976,7 +2976,7 @@ SDOperand SelectionDAG::getMemcpy(SDOperand Chain, SDOperand Dst, // Then check to see if we should lower the memcpy with target-specific // code. If the target chooses to do this, this is the next best. - SDOperand Result = + SDValue Result = TLI.EmitTargetCodeForMemcpy(*this, Chain, Dst, Src, Size, Align, AlwaysInline, DstSV, DstSVOff, SrcSV, SrcSVOff); @@ -2999,7 +2999,7 @@ SDOperand SelectionDAG::getMemcpy(SDOperand Chain, SDOperand Dst, Entry.Node = Dst; Args.push_back(Entry); Entry.Node = Src; Args.push_back(Entry); Entry.Node = Size; Args.push_back(Entry); - std::pair<SDOperand,SDOperand> CallResult = + std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, false, getExternalSymbol("memcpy", TLI.getPointerTy()), @@ -3007,11 +3007,11 @@ SDOperand SelectionDAG::getMemcpy(SDOperand Chain, SDOperand Dst, return CallResult.second; } -SDOperand SelectionDAG::getMemmove(SDOperand Chain, SDOperand Dst, - SDOperand Src, SDOperand Size, - unsigned Align, - const Value *DstSV, uint64_t DstSVOff, - const Value *SrcSV, uint64_t SrcSVOff) { +SDValue SelectionDAG::getMemmove(SDValue Chain, SDValue Dst, + SDValue Src, SDValue Size, + unsigned Align, + const Value *DstSV, uint64_t DstSVOff, + const Value *SrcSV, uint64_t SrcSVOff) { // Check to see if we should lower the memmove to loads and stores first. // For cases within the target-specified limits, this is the best choice. @@ -3021,7 +3021,7 @@ SDOperand SelectionDAG::getMemmove(SDOperand Chain, SDOperand Dst, if (ConstantSize->isNullValue()) return Chain; - SDOperand Result = + SDValue Result = getMemmoveLoadsAndStores(*this, Chain, Dst, Src, ConstantSize->getValue(), Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff); if (Result.Val) @@ -3030,7 +3030,7 @@ SDOperand SelectionDAG::getMemmove(SDOperand Chain, SDOperand Dst, // Then check to see if we should lower the memmove with target-specific // code. If the target chooses to do this, this is the next best. - SDOperand Result = + SDValue Result = TLI.EmitTargetCodeForMemmove(*this, Chain, Dst, Src, Size, Align, DstSV, DstSVOff, SrcSV, SrcSVOff); if (Result.Val) @@ -3043,7 +3043,7 @@ SDOperand SelectionDAG::getMemmove(SDOperand Chain, SDOperand Dst, Entry.Node = Dst; Args.push_back(Entry); Entry.Node = Src; Args.push_back(Entry); Entry.Node = Size; Args.push_back(Entry); - std::pair<SDOperand,SDOperand> CallResult = + std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, false, getExternalSymbol("memmove", TLI.getPointerTy()), @@ -3051,10 +3051,10 @@ SDOperand SelectionDAG::getMemmove(SDOperand Chain, SDOperand Dst, return CallResult.second; } -SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst, - SDOperand Src, SDOperand Size, - unsigned Align, - const Value *DstSV, uint64_t DstSVOff) { +SDValue SelectionDAG::getMemset(SDValue Chain, SDValue Dst, + SDValue Src, SDValue Size, + unsigned Align, + const Value *DstSV, uint64_t DstSVOff) { // Check to see if we should lower the memset to stores first. // For cases within the target-specified limits, this is the best choice. @@ -3064,7 +3064,7 @@ SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst, if (ConstantSize->isNullValue()) return Chain; - SDOperand Result = + SDValue Result = getMemsetStores(*this, Chain, Dst, Src, ConstantSize->getValue(), Align, DstSV, DstSVOff); if (Result.Val) @@ -3073,7 +3073,7 @@ SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst, // Then check to see if we should lower the memset with target-specific // code. If the target chooses to do this, this is the next best. - SDOperand Result = + SDValue Result = TLI.EmitTargetCodeForMemset(*this, Chain, Dst, Src, Size, Align, DstSV, DstSVOff); if (Result.Val) @@ -3094,7 +3094,7 @@ SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst, Args.push_back(Entry); Entry.Node = Size; Entry.Ty = IntPtrTy; Entry.isSExt = false; Args.push_back(Entry); - std::pair<SDOperand,SDOperand> CallResult = + std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, false, getExternalSymbol("memset", TLI.getPointerTy()), @@ -3102,10 +3102,10 @@ SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst, return CallResult.second; } -SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, - SDOperand Ptr, SDOperand Cmp, - SDOperand Swp, const Value* PtrVal, - unsigned Alignment) { +SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain, + SDValue Ptr, SDValue Cmp, + SDValue Swp, const Value* PtrVal, + unsigned Alignment) { assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op"); assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types"); @@ -3116,22 +3116,22 @@ SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, SDVTList VTs = getVTList(VT, MVT::Other); FoldingSetNodeID ID; - SDOperand Ops[] = {Chain, Ptr, Cmp, Swp}; + SDValue Ops[] = {Chain, Ptr, Cmp, Swp}; AddNodeIDNode(ID, Opcode, VTs, Ops, 4); void* IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode* N = getAllocator().Allocate<AtomicSDNode>(); new (N) AtomicSDNode(Opcode, VTs, Chain, Ptr, Cmp, Swp, PtrVal, Alignment); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, - SDOperand Ptr, SDOperand Val, - const Value* PtrVal, - unsigned Alignment) { +SDValue SelectionDAG::getAtomic(unsigned Opcode, SDValue Chain, + SDValue Ptr, SDValue Val, + const Value* PtrVal, + unsigned Alignment) { assert(( Opcode == ISD::ATOMIC_LOAD_ADD || Opcode == ISD::ATOMIC_LOAD_SUB || Opcode == ISD::ATOMIC_SWAP || Opcode == ISD::ATOMIC_LOAD_AND || Opcode == ISD::ATOMIC_LOAD_OR || Opcode == ISD::ATOMIC_LOAD_XOR @@ -3147,22 +3147,22 @@ SDOperand SelectionDAG::getAtomic(unsigned Opcode, SDOperand Chain, SDVTList VTs = getVTList(VT, MVT::Other); FoldingSetNodeID ID; - SDOperand Ops[] = {Chain, Ptr, Val}; + SDValue Ops[] = {Chain, Ptr, Val}; AddNodeIDNode(ID, Opcode, VTs, Ops, 3); void* IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode* N = getAllocator().Allocate<AtomicSDNode>(); new (N) AtomicSDNode(Opcode, VTs, Chain, Ptr, Val, PtrVal, Alignment); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } /// getMergeValues - Create a MERGE_VALUES node from the given operands. /// Allowed to return something different (and simpler) if Simplify is true. -SDOperand SelectionDAG::getMergeValues(const SDOperand *Ops, unsigned NumOps, - bool Simplify) { +SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps, + bool Simplify) { if (Simplify && NumOps == 1) return Ops[0]; @@ -3173,10 +3173,10 @@ SDOperand SelectionDAG::getMergeValues(const SDOperand *Ops, unsigned NumOps, return getNode(ISD::MERGE_VALUES, getVTList(&VTs[0], NumOps), Ops, NumOps); } -SDOperand +SDValue SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, - MVT VT, SDOperand Chain, - SDOperand Ptr, SDOperand Offset, + MVT VT, SDValue Chain, + SDValue Ptr, SDValue Offset, const Value *SV, int SVOffset, MVT EVT, bool isVolatile, unsigned Alignment) { if (Alignment == 0) // Ensure that codegen never sees alignment 0 @@ -3205,7 +3205,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, SDVTList VTs = Indexed ? getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other); - SDOperand Ops[] = { Chain, Ptr, Offset }; + SDValue Ops[] = { Chain, Ptr, Offset }; FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3); ID.AddInteger(AM); @@ -3215,37 +3215,37 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, ID.AddInteger(isVolatile); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<LoadSDNode>(); new (N) LoadSDNode(Ops, VTs, AM, ExtType, EVT, SV, SVOffset, Alignment, isVolatile); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getLoad(MVT VT, - SDOperand Chain, SDOperand Ptr, - const Value *SV, int SVOffset, - bool isVolatile, unsigned Alignment) { - SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType()); +SDValue SelectionDAG::getLoad(MVT VT, + SDValue Chain, SDValue Ptr, + const Value *SV, int SVOffset, + bool isVolatile, unsigned Alignment) { + SDValue Undef = getNode(ISD::UNDEF, Ptr.getValueType()); return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, Chain, Ptr, Undef, SV, SVOffset, VT, isVolatile, Alignment); } -SDOperand SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, MVT VT, - SDOperand Chain, SDOperand Ptr, - const Value *SV, - int SVOffset, MVT EVT, - bool isVolatile, unsigned Alignment) { - SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType()); +SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, MVT VT, + SDValue Chain, SDValue Ptr, + const Value *SV, + int SVOffset, MVT EVT, + bool isVolatile, unsigned Alignment) { + SDValue Undef = getNode(ISD::UNDEF, Ptr.getValueType()); return getLoad(ISD::UNINDEXED, ExtType, VT, Chain, Ptr, Undef, SV, SVOffset, EVT, isVolatile, Alignment); } -SDOperand -SelectionDAG::getIndexedLoad(SDOperand OrigLoad, SDOperand Base, - SDOperand Offset, ISD::MemIndexedMode AM) { +SDValue +SelectionDAG::getIndexedLoad(SDValue OrigLoad, SDValue Base, + SDValue Offset, ISD::MemIndexedMode AM) { LoadSDNode *LD = cast<LoadSDNode>(OrigLoad); assert(LD->getOffset().getOpcode() == ISD::UNDEF && "Load is already a indexed load!"); @@ -3255,17 +3255,17 @@ SelectionDAG::getIndexedLoad(SDOperand OrigLoad, SDOperand Base, LD->isVolatile(), LD->getAlignment()); } -SDOperand SelectionDAG::getStore(SDOperand Chain, SDOperand Val, - SDOperand Ptr, const Value *SV, int SVOffset, - bool isVolatile, unsigned Alignment) { +SDValue SelectionDAG::getStore(SDValue Chain, SDValue Val, + SDValue Ptr, const Value *SV, int SVOffset, + bool isVolatile, unsigned Alignment) { MVT VT = Val.getValueType(); if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getMVTAlignment(VT); SDVTList VTs = getVTList(MVT::Other); - SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType()); - SDOperand Ops[] = { Chain, Val, Ptr, Undef }; + SDValue Undef = getNode(ISD::UNDEF, Ptr.getValueType()); + SDValue Ops[] = { Chain, Val, Ptr, Undef }; FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); ID.AddInteger(ISD::UNINDEXED); @@ -3275,19 +3275,19 @@ SDOperand SelectionDAG::getStore(SDOperand Chain, SDOperand Val, ID.AddInteger(isVolatile); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<StoreSDNode>(); new (N) StoreSDNode(Ops, VTs, ISD::UNINDEXED, false, VT, SV, SVOffset, Alignment, isVolatile); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getTruncStore(SDOperand Chain, SDOperand Val, - SDOperand Ptr, const Value *SV, - int SVOffset, MVT SVT, - bool isVolatile, unsigned Alignment) { +SDValue SelectionDAG::getTruncStore(SDValue Chain, SDValue Val, + SDValue Ptr, const Value *SV, + int SVOffset, MVT SVT, + bool isVolatile, unsigned Alignment) { MVT VT = Val.getValueType(); if (VT == SVT) @@ -3301,8 +3301,8 @@ SDOperand SelectionDAG::getTruncStore(SDOperand Chain, SDOperand Val, Alignment = getMVTAlignment(VT); SDVTList VTs = getVTList(MVT::Other); - SDOperand Undef = getNode(ISD::UNDEF, Ptr.getValueType()); - SDOperand Ops[] = { Chain, Val, Ptr, Undef }; + SDValue Undef = getNode(ISD::UNDEF, Ptr.getValueType()); + SDValue Ops[] = { Chain, Val, Ptr, Undef }; FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); ID.AddInteger(ISD::UNINDEXED); @@ -3312,23 +3312,23 @@ SDOperand SelectionDAG::getTruncStore(SDOperand Chain, SDOperand Val, ID.AddInteger(isVolatile); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<StoreSDNode>(); new (N) StoreSDNode(Ops, VTs, ISD::UNINDEXED, true, SVT, SV, SVOffset, Alignment, isVolatile); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand -SelectionDAG::getIndexedStore(SDOperand OrigStore, SDOperand Base, - SDOperand Offset, ISD::MemIndexedMode AM) { +SDValue +SelectionDAG::getIndexedStore(SDValue OrigStore, SDValue Base, + SDValue Offset, ISD::MemIndexedMode AM) { StoreSDNode *ST = cast<StoreSDNode>(OrigStore); assert(ST->getOffset().getOpcode() == ISD::UNDEF && "Store is already a indexed store!"); SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); - SDOperand Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; + SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; FoldingSetNodeID ID; AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4); ID.AddInteger(AM); @@ -3338,7 +3338,7 @@ SelectionDAG::getIndexedStore(SDOperand OrigStore, SDOperand Base, ID.AddInteger(ST->isVolatile()); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); SDNode *N = getAllocator().Allocate<StoreSDNode>(); new (N) StoreSDNode(Ops, VTs, AM, ST->isTruncatingStore(), ST->getMemoryVT(), @@ -3346,39 +3346,34 @@ SelectionDAG::getIndexedStore(SDOperand OrigStore, SDOperand Base, ST->getAlignment(), ST->isVolatile()); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getVAArg(MVT VT, - SDOperand Chain, SDOperand Ptr, - SDOperand SV) { - SDOperand Ops[] = { Chain, Ptr, SV }; +SDValue SelectionDAG::getVAArg(MVT VT, + SDValue Chain, SDValue Ptr, + SDValue SV) { + SDValue Ops[] = { Chain, Ptr, SV }; return getNode(ISD::VAARG, getVTList(VT, MVT::Other), Ops, 3); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, - const SDUse *Ops, unsigned NumOps) { +SDValue SelectionDAG::getNode(unsigned Opcode, MVT VT, + const SDUse *Ops, unsigned NumOps) { switch (NumOps) { case 0: return getNode(Opcode, VT); - case 1: return getNode(Opcode, VT, Ops[0].getSDOperand()); - case 2: return getNode(Opcode, VT, Ops[0].getSDOperand(), - Ops[1].getSDOperand()); - case 3: return getNode(Opcode, VT, Ops[0].getSDOperand(), - Ops[1].getSDOperand(), Ops[2].getSDOperand()); + case 1: return getNode(Opcode, VT, Ops[0]); + case 2: return getNode(Opcode, VT, Ops[0], Ops[1]); + case 3: return getNode(Opcode, VT, Ops[0], Ops[1], Ops[2]); default: break; } - // Copy from an SDUse array into an SDOperand array for use with + // Copy from an SDUse array into an SDValue array for use with // the regular getNode logic. - SmallVector<SDOperand, 8> NewOps; - NewOps.reserve(NumOps); - for (unsigned i = 0; i != NumOps; ++i) - NewOps.push_back(Ops[i].getSDOperand()); - return getNode(Opcode, VT, Ops, NumOps); + SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps); + return getNode(Opcode, VT, &NewOps[0], NumOps); } -SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, - const SDOperand *Ops, unsigned NumOps) { +SDValue SelectionDAG::getNode(unsigned Opcode, MVT VT, + const SDValue *Ops, unsigned NumOps) { switch (NumOps) { case 0: return getNode(Opcode, VT); case 1: return getNode(Opcode, VT, Ops[0]); @@ -3415,7 +3410,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); N = getAllocator().Allocate<SDNode>(); new (N) SDNode(Opcode, VTs, Ops, NumOps); CSEMap.InsertNode(N, IP); @@ -3427,26 +3422,26 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, MVT VT, #ifndef NDEBUG VerifyNode(N); #endif - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, - const std::vector<MVT> &ResultTys, - const SDOperand *Ops, unsigned NumOps) { +SDValue SelectionDAG::getNode(unsigned Opcode, + const std::vector<MVT> &ResultTys, + const SDValue *Ops, unsigned NumOps) { return getNode(Opcode, getNodeValueTypes(ResultTys), ResultTys.size(), Ops, NumOps); } -SDOperand SelectionDAG::getNode(unsigned Opcode, - const MVT *VTs, unsigned NumVTs, - const SDOperand *Ops, unsigned NumOps) { +SDValue SelectionDAG::getNode(unsigned Opcode, + const MVT *VTs, unsigned NumVTs, + const SDValue *Ops, unsigned NumOps) { if (NumVTs == 1) return getNode(Opcode, VTs[0], Ops, NumOps); return getNode(Opcode, makeVTList(VTs, NumVTs), Ops, NumOps); } -SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, - const SDOperand *Ops, unsigned NumOps) { +SDValue SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, + const SDValue *Ops, unsigned NumOps) { if (VTList.NumVTs == 1) return getNode(Opcode, VTList.VTs[0], Ops, NumOps); @@ -3480,7 +3475,7 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) - return SDOperand(E, 0); + return SDValue(E, 0); if (NumOps == 1) { N = getAllocator().Allocate<UnarySDNode>(); new (N) UnarySDNode(Opcode, VTList, Ops[0]); @@ -3514,42 +3509,42 @@ SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, #ifndef NDEBUG VerifyNode(N); #endif - return SDOperand(N, 0); + return SDValue(N, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList) { +SDValue SelectionDAG::getNode(unsigned Opcode, SDVTList VTList) { return getNode(Opcode, VTList, 0, 0); } -SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, - SDOperand N1) { - SDOperand Ops[] = { N1 }; +SDValue SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, + SDValue N1) { + SDValue Ops[] = { N1 }; return getNode(Opcode, VTList, Ops, 1); } -SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, - SDOperand N1, SDOperand N2) { - SDOperand Ops[] = { N1, N2 }; +SDValue SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, + SDValue N1, SDValue N2) { + SDValue Ops[] = { N1, N2 }; return getNode(Opcode, VTList, Ops, 2); } -SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, - SDOperand N1, SDOperand N2, SDOperand N3) { - SDOperand Ops[] = { N1, N2, N3 }; +SDValue SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, + SDValue N1, SDValue N2, SDValue N3) { + SDValue Ops[] = { N1, N2, N3 }; return getNode(Opcode, VTList, Ops, 3); } -SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, - SDOperand N1, SDOperand N2, SDOperand N3, - SDOperand N4) { - SDOperand Ops[] = { N1, N2, N3, N4 }; +SDValue SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, + SDValue N1, SDValue N2, SDValue N3, + SDValue N4) { + SDValue Ops[] = { N1, N2, N3, N4 }; return getNode(Opcode, VTList, Ops, 4); } -SDOperand SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, - SDOperand N1, SDOperand N2, SDOperand N3, - SDOperand N4, SDOperand N5) { - SDOperand Ops[] = { N1, N2, N3, N4, N5 }; +SDValue SelectionDAG::getNode(unsigned Opcode, SDVTList VTList, + SDValue N1, SDValue N2, SDValue N3, + SDValue N4, SDValue N5) { + SDValue Ops[] = { N1, N2, N3, N4, N5 }; return getNode(Opcode, VTList, Ops, 5); } @@ -3625,8 +3620,7 @@ SDVTList SelectionDAG::getVTList(const MVT *VTs, unsigned NumVTs) { /// already exists. If the resultant node does not exist in the DAG, the /// input node is returned. As a degenerate case, if you specify the same /// input operands as the node already has, the input node is returned. -SDOperand SelectionDAG:: -UpdateNodeOperands(SDOperand InN, SDOperand Op) { +SDValue SelectionDAG::UpdateNodeOperands(SDValue InN, SDValue Op) { SDNode *N = InN.Val; assert(N->getNumOperands() == 1 && "Update with wrong number of operands"); @@ -3636,7 +3630,7 @@ UpdateNodeOperands(SDOperand InN, SDOperand Op) { // See if the modified node already exists. void *InsertPos = 0; if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos)) - return SDOperand(Existing, InN.ResNo); + return SDValue(Existing, InN.ResNo); // Nope it doesn't. Remove the node from its current place in the maps. if (InsertPos) @@ -3653,8 +3647,8 @@ UpdateNodeOperands(SDOperand InN, SDOperand Op) { return InN; } -SDOperand SelectionDAG:: -UpdateNodeOperands(SDOperand InN, SDOperand Op1, SDOperand Op2) { +SDValue SelectionDAG:: +UpdateNodeOperands(SDValue InN, SDValue Op1, SDValue Op2) { SDNode *N = InN.Val; assert(N->getNumOperands() == 2 && "Update with wrong number of operands"); @@ -3665,7 +3659,7 @@ UpdateNodeOperands(SDOperand InN, SDOperand Op1, SDOperand Op2) { // See if the modified node already exists. void *InsertPos = 0; if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos)) - return SDOperand(Existing, InN.ResNo); + return SDValue(Existing, InN.ResNo); // Nope it doesn't. Remove the node from its current place in the maps. if (InsertPos) @@ -3690,28 +3684,28 @@ UpdateNodeOperands(SDOperand InN, SDOperand Op1, SDOperand Op2) { return InN; } -SDOperand SelectionDAG:: -UpdateNodeOperands(SDOperand N, SDOperand Op1, SDOperand Op2, SDOperand Op3) { - SDOperand Ops[] = { Op1, Op2, Op3 }; +SDValue SelectionDAG:: +UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, SDValue Op3) { + SDValue Ops[] = { Op1, Op2, Op3 }; return UpdateNodeOperands(N, Ops, 3); } -SDOperand SelectionDAG:: -UpdateNodeOperands(SDOperand N, SDOperand Op1, SDOperand Op2, - SDOperand Op3, SDOperand Op4) { - SDOperand Ops[] = { Op1, Op2, Op3, Op4 }; +SDValue SelectionDAG:: +UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, + SDValue Op3, SDValue Op4) { + SDValue Ops[] = { Op1, Op2, Op3, Op4 }; return UpdateNodeOperands(N, Ops, 4); } -SDOperand SelectionDAG:: -UpdateNodeOperands(SDOperand N, SDOperand Op1, SDOperand Op2, - SDOperand Op3, SDOperand Op4, SDOperand Op5) { - SDOperand Ops[] = { Op1, Op2, Op3, Op4, Op5 }; +SDValue SelectionDAG:: +UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2, + SDValue Op3, SDValue Op4, SDValue Op5) { + SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 }; return UpdateNodeOperands(N, Ops, 5); } -SDOperand SelectionDAG:: -UpdateNodeOperands(SDOperand InN, const SDOperand *Ops, unsigned NumOps) { +SDValue SelectionDAG:: +UpdateNodeOperands(SDValue InN, const SDValue *Ops, unsigned NumOps) { SDNode *N = InN.Val; assert(N->getNumOperands() == NumOps && "Update with wrong number of operands"); @@ -3731,7 +3725,7 @@ UpdateNodeOperands(SDOperand InN, const SDOperand *Ops, unsigned NumOps) { // See if the modified node already exists. void *InsertPos = 0; if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos)) - return SDOperand(Existing, InN.ResNo); + return SDValue(Existing, InN.ResNo); // Nope it doesn't. Remove the node from its current place in the maps. if (InsertPos) @@ -3773,37 +3767,37 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, - MVT VT, SDOperand Op1) { + MVT VT, SDValue Op1) { SDVTList VTs = getVTList(VT); - SDOperand Ops[] = { Op1 }; + SDValue Ops[] = { Op1 }; return SelectNodeTo(N, MachineOpc, VTs, Ops, 1); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, - MVT VT, SDOperand Op1, - SDOperand Op2) { + MVT VT, SDValue Op1, + SDValue Op2) { SDVTList VTs = getVTList(VT); - SDOperand Ops[] = { Op1, Op2 }; + SDValue Ops[] = { Op1, Op2 }; return SelectNodeTo(N, MachineOpc, VTs, Ops, 2); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, - MVT VT, SDOperand Op1, - SDOperand Op2, SDOperand Op3) { + MVT VT, SDValue Op1, + SDValue Op2, SDValue Op3) { SDVTList VTs = getVTList(VT); - SDOperand Ops[] = { Op1, Op2, Op3 }; + SDValue Ops[] = { Op1, Op2, Op3 }; return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, - MVT VT, const SDOperand *Ops, + MVT VT, const SDValue *Ops, unsigned NumOps) { SDVTList VTs = getVTList(VT); return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, - MVT VT1, MVT VT2, const SDOperand *Ops, + MVT VT1, MVT VT2, const SDValue *Ops, unsigned NumOps) { SDVTList VTs = getVTList(VT1, VT2); return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); @@ -3812,43 +3806,43 @@ SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, MVT VT1, MVT VT2) { SDVTList VTs = getVTList(VT1, VT2); - return SelectNodeTo(N, MachineOpc, VTs, (SDOperand *)0, 0); + return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, MVT VT1, MVT VT2, MVT VT3, - const SDOperand *Ops, unsigned NumOps) { + const SDValue *Ops, unsigned NumOps) { SDVTList VTs = getVTList(VT1, VT2, VT3); return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, MVT VT1, MVT VT2, - SDOperand Op1) { + SDValue Op1) { SDVTList VTs = getVTList(VT1, VT2); - SDOperand Ops[] = { Op1 }; + SDValue Ops[] = { Op1 }; return SelectNodeTo(N, MachineOpc, VTs, Ops, 1); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, MVT VT1, MVT VT2, - SDOperand Op1, SDOperand Op2) { + SDValue Op1, SDValue Op2) { SDVTList VTs = getVTList(VT1, VT2); - SDOperand Ops[] = { Op1, Op2 }; + SDValue Ops[] = { Op1, Op2 }; return SelectNodeTo(N, MachineOpc, VTs, Ops, 2); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, MVT VT1, MVT VT2, - SDOperand Op1, SDOperand Op2, - SDOperand Op3) { + SDValue Op1, SDValue Op2, + SDValue Op3) { SDVTList VTs = getVTList(VT1, VT2); - SDOperand Ops[] = { Op1, Op2, Op3 }; + SDValue Ops[] = { Op1, Op2, Op3 }; return SelectNodeTo(N, MachineOpc, VTs, Ops, 3); } SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc, - SDVTList VTs, const SDOperand *Ops, + SDVTList VTs, const SDValue *Ops, unsigned NumOps) { return MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps); } @@ -3860,37 +3854,37 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, } SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, - MVT VT, SDOperand Op1) { + MVT VT, SDValue Op1) { SDVTList VTs = getVTList(VT); - SDOperand Ops[] = { Op1 }; + SDValue Ops[] = { Op1 }; return MorphNodeTo(N, Opc, VTs, Ops, 1); } SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, - MVT VT, SDOperand Op1, - SDOperand Op2) { + MVT VT, SDValue Op1, + SDValue Op2) { SDVTList VTs = getVTList(VT); - SDOperand Ops[] = { Op1, Op2 }; + SDValue Ops[] = { Op1, Op2 }; return MorphNodeTo(N, Opc, VTs, Ops, 2); } SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, - MVT VT, SDOperand Op1, - SDOperand Op2, SDOperand Op3) { + MVT VT, SDValue Op1, + SDValue Op2, SDValue Op3) { SDVTList VTs = getVTList(VT); - SDOperand Ops[] = { Op1, Op2, Op3 }; + SDValue Ops[] = { Op1, Op2, Op3 }; return MorphNodeTo(N, Opc, VTs, Ops, 3); } SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, - MVT VT, const SDOperand *Ops, + MVT VT, const SDValue *Ops, unsigned NumOps) { SDVTList VTs = getVTList(VT); return MorphNodeTo(N, Opc, VTs, Ops, NumOps); } SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, - MVT VT1, MVT VT2, const SDOperand *Ops, + MVT VT1, MVT VT2, const SDValue *Ops, unsigned NumOps) { SDVTList VTs = getVTList(VT1, VT2); return MorphNodeTo(N, Opc, VTs, Ops, NumOps); @@ -3899,38 +3893,38 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, MVT VT2) { SDVTList VTs = getVTList(VT1, VT2); - return MorphNodeTo(N, Opc, VTs, (SDOperand *)0, 0); + return MorphNodeTo(N, Opc, VTs, (SDValue *)0, 0); } SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, MVT VT2, MVT VT3, - const SDOperand *Ops, unsigned NumOps) { + const SDValue *Ops, unsigned NumOps) { SDVTList VTs = getVTList(VT1, VT2, VT3); return MorphNodeTo(N, Opc, VTs, Ops, NumOps); } SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, MVT VT2, - SDOperand Op1) { + SDValue Op1) { SDVTList VTs = getVTList(VT1, VT2); - SDOperand Ops[] = { Op1 }; + SDValue Ops[] = { Op1 }; return MorphNodeTo(N, Opc, VTs, Ops, 1); } SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, MVT VT2, - SDOperand Op1, SDOperand Op2) { + SDValue Op1, SDValue Op2) { SDVTList VTs = getVTList(VT1, VT2); - SDOperand Ops[] = { Op1, Op2 }; + SDValue Ops[] = { Op1, Op2 }; return MorphNodeTo(N, Opc, VTs, Ops, 2); } SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, MVT VT2, - SDOperand Op1, SDOperand Op2, - SDOperand Op3) { + SDValue Op1, SDValue Op2, + SDValue Op3) { SDVTList VTs = getVTList(VT1, VT2); - SDOperand Ops[] = { Op1, Op2, Op3 }; + SDValue Ops[] = { Op1, Op2, Op3 }; return MorphNodeTo(N, Opc, VTs, Ops, 3); } @@ -3947,7 +3941,7 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, /// the node's users. /// SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, - SDVTList VTs, const SDOperand *Ops, + SDVTList VTs, const SDValue *Ops, unsigned NumOps) { // If an identical node already exists, use it. void *IP = 0; @@ -4027,72 +4021,72 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc, SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT) { return getNode(~Opcode, VT).Val; } -SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, SDOperand Op1) { +SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, SDValue Op1) { return getNode(~Opcode, VT, Op1).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, - SDOperand Op1, SDOperand Op2) { + SDValue Op1, SDValue Op2) { return getNode(~Opcode, VT, Op1, Op2).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, - SDOperand Op1, SDOperand Op2, - SDOperand Op3) { + SDValue Op1, SDValue Op2, + SDValue Op3) { return getNode(~Opcode, VT, Op1, Op2, Op3).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT, - const SDOperand *Ops, unsigned NumOps) { + const SDValue *Ops, unsigned NumOps) { return getNode(~Opcode, VT, Ops, NumOps).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2) { const MVT *VTs = getNodeValueTypes(VT1, VT2); - SDOperand Op; + SDValue Op; return getNode(~Opcode, VTs, 2, &Op, 0).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, - MVT VT2, SDOperand Op1) { + MVT VT2, SDValue Op1) { const MVT *VTs = getNodeValueTypes(VT1, VT2); return getNode(~Opcode, VTs, 2, &Op1, 1).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, - MVT VT2, SDOperand Op1, - SDOperand Op2) { + MVT VT2, SDValue Op1, + SDValue Op2) { const MVT *VTs = getNodeValueTypes(VT1, VT2); - SDOperand Ops[] = { Op1, Op2 }; + SDValue Ops[] = { Op1, Op2 }; return getNode(~Opcode, VTs, 2, Ops, 2).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, - MVT VT2, SDOperand Op1, - SDOperand Op2, SDOperand Op3) { + MVT VT2, SDValue Op1, + SDValue Op2, SDValue Op3) { const MVT *VTs = getNodeValueTypes(VT1, VT2); - SDOperand Ops[] = { Op1, Op2, Op3 }; + SDValue Ops[] = { Op1, Op2, Op3 }; return getNode(~Opcode, VTs, 2, Ops, 3).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, - const SDOperand *Ops, unsigned NumOps) { + const SDValue *Ops, unsigned NumOps) { const MVT *VTs = getNodeValueTypes(VT1, VT2); return getNode(~Opcode, VTs, 2, Ops, NumOps).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, - SDOperand Op1, SDOperand Op2) { + SDValue Op1, SDValue Op2) { const MVT *VTs = getNodeValueTypes(VT1, VT2, VT3); - SDOperand Ops[] = { Op1, Op2 }; + SDValue Ops[] = { Op1, Op2 }; return getNode(~Opcode, VTs, 3, Ops, 2).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, - SDOperand Op1, SDOperand Op2, - SDOperand Op3) { + SDValue Op1, SDValue Op2, + SDValue Op3) { const MVT *VTs = getNodeValueTypes(VT1, VT2, VT3); - SDOperand Ops[] = { Op1, Op2, Op3 }; + SDValue Ops[] = { Op1, Op2, Op3 }; return getNode(~Opcode, VTs, 3, Ops, 3).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, - const SDOperand *Ops, unsigned NumOps) { + const SDValue *Ops, unsigned NumOps) { const MVT *VTs = getNodeValueTypes(VT1, VT2, VT3); return getNode(~Opcode, VTs, 3, Ops, NumOps).Val; } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, MVT VT2, MVT VT3, MVT VT4, - const SDOperand *Ops, unsigned NumOps) { + const SDValue *Ops, unsigned NumOps) { std::vector<MVT> VTList; VTList.push_back(VT1); VTList.push_back(VT2); @@ -4103,7 +4097,7 @@ SDNode *SelectionDAG::getTargetNode(unsigned Opcode, MVT VT1, } SDNode *SelectionDAG::getTargetNode(unsigned Opcode, const std::vector<MVT> &ResultTys, - const SDOperand *Ops, unsigned NumOps) { + const SDValue *Ops, unsigned NumOps) { const MVT *VTs = getNodeValueTypes(ResultTys); return getNode(~Opcode, VTs, ResultTys.size(), Ops, NumOps).Val; @@ -4112,7 +4106,7 @@ SDNode *SelectionDAG::getTargetNode(unsigned Opcode, /// getNodeIfExists - Get the specified node if it's already available, or /// else return NULL. SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, - const SDOperand *Ops, unsigned NumOps) { + const SDValue *Ops, unsigned NumOps) { if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) { FoldingSetNodeID ID; AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps); @@ -4129,7 +4123,7 @@ SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList, /// /// This version assumes From has a single result value. /// -void SelectionDAG::ReplaceAllUsesWith(SDOperand FromN, SDOperand To, +void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To, DAGUpdateListener *UpdateListener) { SDNode *From = FromN.Val; assert(From->getNumValues() == 1 && FromN.ResNo == 0 && @@ -4223,10 +4217,10 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To, /// This version can replace From with any result values. To must match the /// number and types of values returned by From. void SelectionDAG::ReplaceAllUsesWith(SDNode *From, - const SDOperand *To, + const SDValue *To, DAGUpdateListener *UpdateListener) { if (From->getNumValues() == 1) // Handle the simple case efficiently. - return ReplaceAllUsesWith(SDOperand(From, 0), To[0], UpdateListener); + return ReplaceAllUsesWith(SDValue(From, 0), To[0], UpdateListener); while (!From->use_empty()) { SDNode::use_iterator UI = From->use_begin(); @@ -4238,7 +4232,7 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From, for (SDNode::op_iterator I = U->op_begin(), E = U->op_end(); I != E; ++I, ++operandNum) if (I->getVal() == From) { - const SDOperand &ToOp = To[I->getSDOperand().ResNo]; + const SDValue &ToOp = To[I->getSDValue().ResNo]; From->removeUser(operandNum, U); *I = ToOp; I->setUser(U); @@ -4265,7 +4259,7 @@ void SelectionDAG::ReplaceAllUsesWith(SDNode *From, /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving /// uses of other values produced by From.Val alone. The Deleted vector is /// handled the same way as for ReplaceAllUsesWith. -void SelectionDAG::ReplaceAllUsesOfValueWith(SDOperand From, SDOperand To, +void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To, DAGUpdateListener *UpdateListener){ // Handle the really simple, really trivial case efficiently. if (From == To) return; @@ -4331,8 +4325,8 @@ void SelectionDAG::ReplaceAllUsesOfValueWith(SDOperand From, SDOperand To, /// uses of other values produced by From.Val alone. The same value may /// appear in both the From and To list. The Deleted vector is /// handled the same way as for ReplaceAllUsesWith. -void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDOperand *From, - const SDOperand *To, +void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From, + const SDValue *To, unsigned Num, DAGUpdateListener *UpdateListener){ // Handle the simple, trivial case efficiently. @@ -4549,7 +4543,7 @@ bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const { // TODO: Only iterate over uses of a given value of the node for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) { - if (UI.getUse().getSDOperand().ResNo == Value) { + if (UI.getUse().getSDValue().ResNo == Value) { if (NUses == 0) return false; --NUses; @@ -4567,7 +4561,7 @@ bool SDNode::hasAnyUseOfValue(unsigned Value) const { assert(Value < getNumValues() && "Bad value!"); for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) - if (UI.getUse().getSDOperand().ResNo == Value) + if (UI.getUse().getSDValue().ResNo == Value) return true; return false; @@ -4591,7 +4585,7 @@ bool SDNode::isOnlyUserOf(SDNode *N) const { /// isOperand - Return true if this node is an operand of N. /// -bool SDOperand::isOperandOf(SDNode *N) const { +bool SDValue::isOperandOf(SDNode *N) const { for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) if (*this == N->getOperand(i)) return true; @@ -4610,7 +4604,7 @@ bool SDNode::isOperandOf(SDNode *N) const { /// side-effecting instructions. In practice, this looks through token /// factors and non-volatile loads. In order to remain efficient, this only /// looks a couple of nodes in, it does not do an exhaustive search. -bool SDOperand::reachesChainWithoutSideEffects(SDOperand Dest, +bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, unsigned Depth) const { if (*this == Dest) return true; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index ac9e399e9d..7d34ca2744 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -261,21 +261,21 @@ namespace { /// this value and returns the result as a ValueVTs value. This uses /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. - SDOperand getCopyFromRegs(SelectionDAG &DAG, - SDOperand &Chain, SDOperand *Flag) const; + SDValue getCopyFromRegs(SelectionDAG &DAG, + SDValue &Chain, SDValue *Flag) const; /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the /// specified value into the registers specified by this object. This uses /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. - void getCopyToRegs(SDOperand Val, SelectionDAG &DAG, - SDOperand &Chain, SDOperand *Flag) const; + void getCopyToRegs(SDValue Val, SelectionDAG &DAG, + SDValue &Chain, SDValue *Flag) const; /// AddInlineAsmOperands - Add this value to the specified inlineasm node /// operand list. This adds the code marker and includes the number of /// values added into it. void AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, - std::vector<SDOperand> &Ops) const; + std::vector<SDValue> &Ops) const; }; } @@ -489,20 +489,20 @@ namespace llvm { class SelectionDAGLowering { MachineBasicBlock *CurMBB; - DenseMap<const Value*, SDOperand> NodeMap; + DenseMap<const Value*, SDValue> NodeMap; /// PendingLoads - Loads are not emitted to the program immediately. We bunch /// them up and then emit token factor nodes when possible. This allows us to /// get simple disambiguation between loads without worrying about alias /// analysis. - SmallVector<SDOperand, 8> PendingLoads; + SmallVector<SDValue, 8> PendingLoads; /// PendingExports - CopyToReg nodes that copy values to virtual registers /// for export to other blocks need to be emitted before any terminator /// instruction, but they have no other ordering requirements. We bunch them /// up and the emit a single tokenfactor for them just before terminator /// instructions. - std::vector<SDOperand> PendingExports; + std::vector<SDValue> PendingExports; /// Case - A struct to record the Value for a switch case, and the /// case's target basic block. @@ -610,19 +610,19 @@ public: /// a store or any other node that may need to be ordered after any /// prior load instructions. /// - SDOperand getRoot() { + SDValue getRoot() { if (PendingLoads.empty()) return DAG.getRoot(); if (PendingLoads.size() == 1) { - SDOperand Root = PendingLoads[0]; + SDValue Root = PendingLoads[0]; DAG.setRoot(Root); PendingLoads.clear(); return Root; } // Otherwise, we have to make a token factor node. - SDOperand Root = DAG.getNode(ISD::TokenFactor, MVT::Other, + SDValue Root = DAG.getNode(ISD::TokenFactor, MVT::Other, &PendingLoads[0], PendingLoads.size()); PendingLoads.clear(); DAG.setRoot(Root); @@ -633,8 +633,8 @@ public: /// PendingLoad items, flush all the PendingExports items. It is necessary /// to do this before emitting a terminator instruction. /// - SDOperand getControlRoot() { - SDOperand Root = DAG.getRoot(); + SDValue getControlRoot() { + SDValue Root = DAG.getRoot(); if (PendingExports.empty()) return Root; @@ -679,10 +679,10 @@ public: void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; } - SDOperand getValue(const Value *V); + SDValue getValue(const Value *V); - void setValue(const Value *V, SDOperand NewN) { - SDOperand &N = NodeMap[V]; + void setValue(const Value *V, SDValue NewN) { + SDValue &N = NodeMap[V]; assert(N.Val == 0 && "Already set a value for this node!"); N = NewN; } @@ -696,7 +696,7 @@ public: unsigned Opc); bool isExportableFromCurrentBlock(Value *V, const BasicBlock *FromBB); void ExportFromCurrentBlock(Value *V); - void LowerCallTo(CallSite CS, SDOperand Callee, bool IsTailCall, + void LowerCallTo(CallSite CS, SDValue Callee, bool IsTailCall, MachineBasicBlock *LandingPad = NULL); // Terminator instructions. @@ -827,15 +827,15 @@ private: /// larger then ValueVT then AssertOp can be used to specify whether the extra /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT /// (ISD::AssertSext). -static SDOperand getCopyFromParts(SelectionDAG &DAG, - const SDOperand *Parts, +static SDValue getCopyFromParts(SelectionDAG &DAG, + const SDValue *Parts, unsigned NumParts, MVT PartVT, MVT ValueVT, ISD::NodeType AssertOp = ISD::DELETED_NODE) { assert(NumParts > 0 && "No parts to assemble!"); TargetLowering &TLI = DAG.getTargetLoweringInfo(); - SDOperand Val = Parts[0]; + SDValue Val = Parts[0]; if (NumParts > 1) { // Assemble the value from multiple parts. @@ -849,7 +849,7 @@ static SDOperand getCopyFromParts(SelectionDAG &DAG, unsigned RoundBits = PartBits * RoundParts; MVT RoundVT = RoundBits == ValueBits ? ValueVT : MVT::getIntegerVT(RoundBits); - SDOperand Lo, Hi; + SDValue Lo, Hi; if (RoundParts > 2) { MVT HalfVT = MVT::getIntegerVT(RoundBits/2); @@ -896,7 +896,7 @@ static SDOperand getCopyFromParts(SelectionDAG &DAG, "Part type doesn't match part!"); // Assemble the parts into intermediate operands. - SmallVector<SDOperand, 8> Ops(NumIntermediates); + SmallVector<SDValue, 8> Ops(NumIntermediates); if (NumIntermediates == NumParts) { // If the register was not expanded, truncate or copy the value, // as appropriate. @@ -967,15 +967,15 @@ static SDOperand getCopyFromParts(SelectionDAG &DAG, return DAG.getNode(ISD::BIT_CONVERT, ValueVT, Val); assert(0 && "Unknown mismatch!"); - return SDOperand(); + return SDValue(); } /// getCopyToParts - Create a series of nodes that contain the specified value /// split into legal parts. If the parts contain more bits than Val, then, for /// integers, ExtendKind can be used to specify how to generate the extra bits. static void getCopyToParts(SelectionDAG &DAG, - SDOperand Val, - SDOperand *Parts, + SDValue Val, + SDValue *Parts, unsigned NumParts, MVT PartVT, ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { @@ -1039,7 +1039,7 @@ static void getCopyToParts(SelectionDAG &DAG, unsigned RoundParts = 1 << Log2_32(NumParts); unsigned RoundBits = RoundParts * PartBits; unsigned OddParts = NumParts - RoundParts; - SDOperand OddVal = DAG.getNode(ISD::SRL, ValueVT, Val, + SDValue OddVal = DAG.getNode(ISD::SRL, ValueVT, Val, DAG.getConstant(RoundBits, TLI.getShiftAmountTy())); getCopyToParts(DAG, OddVal, Parts + RoundParts, OddParts, PartVT); @@ -1060,8 +1060,8 @@ static void getCopyToParts(SelectionDAG &DAG, for (unsigned i = 0; i < NumParts; i += StepSize) { unsigned ThisBits = StepSize * PartBits / 2; MVT ThisVT = MVT::getIntegerVT (ThisBits); - SDOperand &Part0 = Parts[i]; - SDOperand &Part1 = Parts[i+StepSize/2]; + SDValue &Part0 = Parts[i]; + SDValue &Part1 = Parts[i+StepSize/2]; Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, ThisVT, Part0, DAG.getConstant(1, PtrVT)); @@ -1113,7 +1113,7 @@ static void getCopyToParts(SelectionDAG &DAG, assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); // Split the vector into intermediate operands. - SmallVector<SDOperand, 8> Ops(NumIntermediates); + SmallVector<SDValue, 8> Ops(NumIntermediates); for (unsigned i = 0; i != NumIntermediates; ++i) if (IntermediateVT.isVector()) Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, @@ -1143,8 +1143,8 @@ static void getCopyToParts(SelectionDAG &DAG, } -SDOperand SelectionDAGLowering::getValue(const Value *V) { - SDOperand &N = NodeMap[V]; +SDValue SelectionDAGLowering::getValue(const Value *V) { + SDValue &N = NodeMap[V]; if (N.Val) return N; if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) { @@ -1168,18 +1168,18 @@ SDOperand SelectionDAGLowering::getValue(const Value *V) { if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { visit(CE->getOpcode(), *CE); - SDOperand N1 = NodeMap[V]; + SDValue N1 = NodeMap[V]; assert(N1.Val && "visit didn't populate the ValueMap!"); return N1; } if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) { - SmallVector<SDOperand, 4> Constants; + SmallVector<SDValue, 4> Constants; for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end(); OI != OE; ++OI) { SDNode *Val = getValue(*OI).Val; for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) - Constants.push_back(SDOperand(Val, i)); + Constants.push_back(SDValue(Val, i)); } return DAG.getMergeValues(&Constants[0], Constants.size()); } @@ -1189,9 +1189,9 @@ SDOperand SelectionDAGLowering::getValue(const Value *V) { "Unknown array constant!"); unsigned NumElts = ATy->getNumElements(); if (NumElts == 0) - return SDOperand(); // empty array + return SDValue(); // empty array MVT EltVT = TLI.getValueType(ATy->getElementType()); - SmallVector<SDOperand, 4> Constants(NumElts); + SmallVector<SDValue, 4> Constants(NumElts); for (unsigned i = 0, e = NumElts; i != e; ++i) { if (isa<UndefValue>(C)) Constants[i] = DAG.getNode(ISD::UNDEF, EltVT); @@ -1208,8 +1208,8 @@ SDOperand SelectionDAGLowering::getValue(const Value *V) { "Unknown struct constant!"); unsigned NumElts = STy->getNumElements(); if (NumElts == 0) - return SDOperand(); // empty struct - SmallVector<SDOperand, 4> Constants(NumElts); + return SDValue(); // empty struct + SmallVector<SDValue, 4> Constants(NumElts); for (unsigned i = 0, e = NumElts; i != e; ++i) { MVT EltVT = TLI.getValueType(STy->getElementType(i)); if (isa<UndefValue>(C)) @@ -1227,7 +1227,7 @@ SDOperand SelectionDAGLowering::getValue(const Value *V) { // Now that we know the number and type of the elements, get that number of // elements into the Ops array based on what kind of constant it is. - SmallVector<SDOperand, 16> Ops; + SmallVector<SDValue, 16> Ops; if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) { for (unsigned i = 0; i != NumElements; ++i) Ops.push_back(getValue(CP->getOperand(i))); @@ -1236,7 +1236,7 @@ SDOperand SelectionDAGLowering::getValue(const Value *V) { "Unknown vector constant!"); MVT EltVT = TLI.getValueType(VecTy->getElementType()); - SDOperand Op; + SDValue Op; if (isa<UndefValue>(C)) Op = DAG.getNode(ISD::UNDEF, EltVT); else if (EltVT.isFloatingPoint()) @@ -1263,7 +1263,7 @@ SDOperand SelectionDAGLowering::getValue(const Value *V) { assert(InReg && "Value not in map!"); RegsForValue RFV(TLI, InReg, V->getType()); - SDOperand Chain = DAG.getEntryNode(); + SDValue Chain = DAG.getEntryNode(); return RFV.getCopyFromRegs(DAG, Chain, NULL); } @@ -1274,10 +1274,10 @@ void SelectionDAGLowering::visitRet(ReturnInst &I) { return; } - SmallVector<SDOperand, 8> NewValues; + SmallVector<SDValue, 8> NewValues; NewValues.push_back(getControlRoot()); for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { - SDOperand RetOp = getValue(I.getOperand(i)); + SDValue RetOp = getValue(I.getOperand(i)); SmallVector<MVT, 4> ValueVTs; ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs); @@ -1294,7 +1294,7 @@ void SelectionDAGLowering::visitRet(ReturnInst &I) { unsigned NumParts = TLI.getNumRegisters(VT); MVT PartVT = TLI.getRegisterType(VT); - SmallVector<SDOperand, 4> Parts(NumParts); + SmallVector<SDValue, 4> Parts(NumParts); ISD::NodeType ExtendKind = ISD::ANY_EXTEND; const Function *F = I.getParent()->getParent(); @@ -1303,7 +1303,7 @@ void SelectionDAGLowering::visitRet(ReturnInst &I) { else if (F->paramHasAttr(0, ParamAttr::ZExt)) ExtendKind = ISD::ZERO_EXTEND; - getCopyToParts(DAG, SDOperand(RetOp.Val, RetOp.ResNo + j), + getCopyToParts(DAG, SDValue(RetOp.Val, RetOp.ResNo + j), &Parts[0], NumParts, PartVT, ExtendKind); for (unsigned i = 0; i < NumParts; ++i) { @@ -1590,8 +1590,8 @@ void SelectionDAGLowering::visitBr(BranchInst &I) { /// visitSwitchCase - Emits the necessary code to represent a single node in /// the binary search tree resulting from lowering a switch instruction. void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { - SDOperand Cond; - SDOperand CondLHS = getValue(CB.CmpLHS); + SDValue Cond; + SDValue CondLHS = getValue(CB.CmpLHS); // Build the setcc now. if (CB.CmpMHS == NULL) { @@ -1600,7 +1600,7 @@ void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ) Cond = CondLHS; else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) { - SDOperand True = DAG.getConstant(1, CondLHS.getValueType()); + SDValue True = DAG.getConstant(1, CondLHS.getValueType()); Cond = DAG.getNode(ISD::XOR, CondLHS.getValueType(), CondLHS, True); } else Cond = DAG.getSetCC(MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC); @@ -1610,13 +1610,13 @@ void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { uint64_t Low = cast<ConstantInt>(CB.CmpLHS)->getSExtValue(); uint64_t High = cast<ConstantInt>(CB.CmpRHS)->getSExtValue(); - SDOperand CmpOp = getValue(CB.CmpMHS); + SDValue CmpOp = getValue(CB.CmpMHS); MVT VT = CmpOp.getValueType(); if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { Cond = DAG.getSetCC(MVT::i1, CmpOp, DAG.getConstant(High, VT), ISD::SETLE); } else { - SDOperand SUB = DAG.getNode(ISD::SUB, VT, CmpOp, DAG.getConstant(Low, VT)); + SDValue SUB = DAG.getNode(ISD::SUB, VT, CmpOp, DAG.getConstant(Low, VT)); Cond = DAG.getSetCC(MVT::i1, SUB, DAG.getConstant(High-Low, VT), ISD::SETULE); } @@ -1637,10 +1637,10 @@ void SelectionDAGLowering::visitSwitchCase(SelectionDAGISel::CaseBlock &CB) { // fall through to the lhs instead of the rhs block. if (CB.TrueBB == NextBlock) { std::swap(CB.TrueBB, CB.FalseBB); - SDOperand True = DAG.getConstant(1, Cond.getValueType()); + SDValue True = DAG.getConstant(1, Cond.getValueType()); Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True); } - SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getControlRoot(), Cond, + SDValue BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, getControlRoot(), Cond, DAG.getBasicBlock(CB.TrueBB)); if (CB.FalseBB == NextBlock) DAG.setRoot(BrCond); @@ -1654,8 +1654,8 @@ void SelectionDAGLowering::visitJumpTable(SelectionDAGISel::JumpTable &JT) { // Emit the code for the jump table assert(JT.Reg != -1U && "Should lower JT Header first!"); MVT PTy = TLI.getPointerTy(); - SDOperand Index = DAG.getCopyFromReg(getControlRoot(), JT.Reg, PTy); - SDOperand Table = DAG.getJumpTable(JT.JTI, PTy); + SDValue Index = DAG.getCopyFromReg(getControlRoot(), JT.Reg, PTy); + SDValue Table = DAG.getJumpTable(JT.JTI, PTy); DAG.setRoot(DAG.getNode(ISD::BR_JT, MVT::Other, Index.getValue(1), Table, Index)); return; @@ -1668,9 +1668,9 @@ void SelectionDAGLowering::visitJumpTableHeader(SelectionDAGISel::JumpTable &JT, // Subtract the lowest switch case value from the value being switched on // and conditional branch to default mbb if the result is greater than the // difference between smallest and largest cases. - SDOperand SwitchOp = getValue(JTH.SValue); + SDValue SwitchOp = getValue(JTH.SValue); MVT VT = SwitchOp.getValueType(); - SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, + SDValue SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, DAG.getConstant(JTH.First, VT)); // The SDNode we just created, which holds the value being switched on @@ -1684,13 +1684,13 @@ void SelectionDAGLowering::visitJumpTableHeader(SelectionDAGISel::JumpTable &JT, SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), SUB); unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy()); - SDOperand CopyTo = DAG.getCopyToReg(getControlRoot(), JumpTableReg, SwitchOp); + SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), JumpTableReg, SwitchOp); JT.Reg = JumpTableReg; // Emit the range check for the jump table, and branch to the default // block for the switch statement if the value being switched on exceeds // the largest case in the switch. - SDOperand CMP = DAG.getSetCC(TLI.getSetCCResultType(SUB), SUB, + SDValue CMP = DAG.getSetCC(TLI.getSetCCResultType(SUB), SUB, DAG.getConstant(JTH.Last-JTH.First,VT), ISD::SETUGT); @@ -1701,7 +1701,7 @@ void SelectionDAGLowering::visitJumpTableHeader(SelectionDAGISel::JumpTable &JT, if (++BBI != CurMBB->getParent()->end()) NextBlock = BBI; - SDOperand BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP, + SDValue BrCond = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, CMP, DAG.getBasicBlock(JT.Default)); if (JT.MBB == NextBlock) @@ -1717,29 +1717,29 @@ void SelectionDAGLowering::visitJumpTableHeader(SelectionDAGISel::JumpTable &JT, /// suitable for "bit tests" void SelectionDAGLowering::visitBitTestHeader(SelectionDAGISel::BitTestBlock &B) { // Subtract the minimum value - SDOperand SwitchOp = getValue(B.SValue); + SDValue SwitchOp = getValue(B.SValue); MVT VT = SwitchOp.getValueType(); - SDOperand SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, + SDValue SUB = DAG.getNode(ISD::SUB, VT, SwitchOp, DAG.getConstant(B.First, VT)); // Check range - SDOperand RangeCmp = DAG.getSetCC(TLI.getSetCCResultType(SUB), SUB, + SDValue RangeCmp = DAG.getSetCC(TLI.getSetCCResultType(SUB), SUB, DAG.getConstant(B.Range, VT), ISD::SETUGT); - SDOperand ShiftOp; + SDValue ShiftOp; if (VT.bitsGT(TLI.getShiftAmountTy())) ShiftOp = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), SUB); else ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, TLI.getShiftAmountTy(), SUB); // Make desired shift - SDOperand SwitchVal = DAG.getNode(ISD::SHL, TLI.getPointerTy(), + SDValue SwitchVal = DAG.getNode(ISD::SHL, TLI.getPointerTy(), DAG.getConstant(1, TLI.getPointerTy()), ShiftOp); unsigned SwitchReg = FuncInfo.MakeReg(TLI.getPointerTy()); - SDOperand CopyTo = DAG.getCopyToReg(getControlRoot(), SwitchReg, SwitchVal); + SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), SwitchReg, SwitchVal); B.Reg = SwitchReg; // Set NextBlock to be the MBB immediately after the current one, if any. @@ -1754,7 +1754,7 @@ void SelectionDAGLowering::visitBitTestHeader(SelectionDAGISel::BitTestBlock &B) CurMBB->addSuccessor(B.Default); CurMBB->addSuccessor(MBB); - SDOperand BrRange = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, RangeCmp, + SDValue BrRange = DAG.getNode(ISD::BRCOND, MVT::Other, CopyTo, RangeCmp, DAG.getBasicBlock(B.Default)); if (MBB == NextBlock) @@ -1771,19 +1771,19 @@ void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB, unsigned Reg, SelectionDAGISel::BitTestCase &B) { // Emit bit tests and jumps - SDOperand SwitchVal = DAG.getCopyFromReg(getControlRoot(), Reg, + SDValue SwitchVal = DAG.getCopyFromReg(getControlRoot(), Reg, TLI.getPointerTy()); - SDOperand AndOp = DAG.getNode(ISD::AND, TLI.getPointerTy(), SwitchVal, + SDValue AndOp = DAG.getNode(ISD::AND, TLI.getPointerTy(), SwitchVal, DAG.getConstant(B.Mask, TLI.getPointerTy())); - SDOperand AndCmp = DAG.getSetCC(TLI.getSetCCResultType(AndOp), AndOp, + SDValue AndCmp = DAG.getSetCC(TLI.getSetCCResultType(AndOp), AndOp, DAG.getConstant(0, TLI.getPointerTy()), ISD::SETNE); CurMBB->addSuccessor(B.TargetBB); CurMBB->addSuccessor(NextMBB); - SDOperand BrAnd = DAG.getNode(ISD::BRCOND, MVT::Other, getControlRoot(), + SDValue BrAnd = DAG.getNode(ISD::BRCOND, MVT::Other, getControlRoot(), AndCmp, DAG.getBasicBlock(B.TargetBB)); // Set NextBlock to be the MBB immediately after the current one, if any. @@ -2378,7 +2378,7 @@ void SelectionDAGLowering::visitSub(User &I) { std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy)); Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size()); if (CV == CNZ) { - SDOperand Op2 = getValue(I.getOperand(1)); + SDValue Op2 = getValue(I.getOperand(1)); setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); return; } @@ -2388,7 +2388,7 @@ void SelectionDAGLowering::visitSub(User &I) { if (Ty->isFloatingPoint()) { if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) { - SDOperand Op2 = getValue(I.getOperand(1)); + SDValue Op2 = getValue(I.getOperand(1)); setValue(&I, DAG.getNode(ISD::FNEG, Op2.getValueType(), Op2)); return; } @@ -2398,15 +2398,15 @@ void SelectionDAGLowering::visitSub(User &I) { } void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) { - SDOperand Op1 = getValue(I.getOperand(0)); - SDOperand Op2 = getValue(I.getOperand(1)); + SDValue Op1 = getValue(I.getOperand(0)); + SDValue Op2 = getValue(I.getOperand(1)); setValue(&I, DAG.getNode(OpCode, Op1.getValueType(), Op1, Op2)); } void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) { - SDOperand Op1 = getValue(I.getOperand(0)); - SDOperand Op2 = getValue(I.getOperand(1)); + SDValue Op1 = getValue(I.getOperand(0)); + SDValue Op2 = getValue(I.getOperand(1)); if (TLI.getShiftAmountTy().bitsLT(Op2.getValueType())) Op2 = DAG.getNode(ISD::TRUNCATE, TLI.getShiftAmountTy(), Op2); @@ -2422,8 +2422,8 @@ void SelectionDAGLowering::visitICmp(User &I) { predicate = IC->getPredicate(); else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) predicate = ICmpInst::Predicate(IC->getPredicate()); - SDOperand Op1 = getValue(I.getOperand(0)); - SDOperand Op2 = getValue(I.getOperand(1)); + SDValue Op1 = getValue(I.getOperand(0)); + SDValue Op2 = getValue(I.getOperand(1)); ISD::CondCode Opcode; switch (predicate) { case ICmpInst::ICMP_EQ : Opcode = ISD::SETEQ; break; @@ -2450,8 +2450,8 @@ void SelectionDAGLowering::visitFCmp(User &I) { predicate = FC->getPredicate(); else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) predicate = FCmpInst::Predicate(FC->getPredicate()); - SDOperand Op1 = getValue(I.getOperand(0)); - SDOperand Op2 = getValue(I.getOperand(1)); + SDValue Op1 = getValue(I.getOperand(0)); + SDValue Op2 = getValue(I.getOperand(1)); ISD::CondCode Condition, FOC, FPC; switch (predicate) { case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; @@ -2488,8 +2488,8 @@ void SelectionDAGLowering::visitVICmp(User &I) { predicate = IC->getPredicate(); else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) predicate = ICmpInst::Predicate(IC->getPredicate()); - SDOperand Op1 = getValue(I.getOperand(0)); - SDOperand Op2 = getValue(I.getOperand(1)); + SDValue Op1 = getValue(I.getOperand(0)); + SDValue Op2 = getValue(I.getOperand(1)); ISD::CondCode Opcode; switch (predicate) { case ICmpInst::ICMP_EQ : Opcode = ISD::SETEQ; break; @@ -2516,8 +2516,8 @@ void SelectionDAGLowering::visitVFCmp(User &I) { predicate = FC->getPredicate(); else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) predicate = FCmpInst::Predicate(FC->getPredicate()); - SDOperand Op1 = getValue(I.getOperand(0)); - SDOperand Op2 = getValue(I.getOperand(1)); + SDValue Op1 = getValue(I.getOperand(0)); + SDValue Op2 = getValue(I.getOperand(1)); ISD::CondCode Condition, FOC, FPC; switch (predicate) { case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break; @@ -2552,9 +2552,9 @@ void SelectionDAGLowering::visitVFCmp(User &I) { } void SelectionDAGLowering::visitSelect(User &I) { - SDOperand Cond = getValue(I.getOperand(0)); - SDOperand TrueVal = getValue(I.getOperand(1)); - SDOperand FalseVal = getValue(I.getOperand(2)); + SDValue Cond = getValue(I.getOperand(0)); + SDValue TrueVal = getValue(I.getOperand(1)); + SDValue FalseVal = getValue(I.getOperand(2)); setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond, TrueVal, FalseVal)); } @@ -2562,7 +2562,7 @@ void SelectionDAGLowering::visitSelect(User &I) { void SelectionDAGLowering::visitTrunc(User &I) { // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::TRUNCATE, DestVT, N)); } @@ -2570,7 +2570,7 @@ void SelectionDAGLowering::visitTrunc(User &I) { void SelectionDAGLowering::visitZExt(User &I) { // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). // ZExt also can't be a cast to bool for same reason. So, nothing much to do - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestVT, N)); } @@ -2578,49 +2578,49 @@ void SelectionDAGLowering::visitZExt(User &I) { void SelectionDAGLowering::visitSExt(User &I) { // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). // SExt also can't be a cast to bool for same reason. So, nothing much to do - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestVT, N)); } void SelectionDAGLowering::visitFPTrunc(User &I) { // FPTrunc is never a no-op cast, no need to check - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::FP_ROUND, DestVT, N, DAG.getIntPtrConstant(0))); } void SelectionDAGLowering::visitFPExt(User &I){ // FPTrunc is never a no-op cast, no need to check - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestVT, N)); } void SelectionDAGLowering::visitFPToUI(User &I) { // FPToUI is never a no-op cast, no need to check - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestVT, N)); } void SelectionDAGLowering::visitFPToSI(User &I) { // FPToSI is never a no-op cast, no need to check - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestVT, N)); } void SelectionDAGLowering::visitUIToFP(User &I) { // UIToFP is never a no-op cast, no need to check - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestVT, N)); } void SelectionDAGLowering::visitSIToFP(User &I){ // UIToFP is never a no-op cast, no need to check - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestVT, N)); } @@ -2628,10 +2628,10 @@ void SelectionDAGLowering::visitSIToFP(User &I){ void SelectionDAGLowering::visitPtrToInt(User &I) { // What to do depends on the size of the integer and the size of the pointer. // We can either truncate, zero extend, or no-op, accordingly. - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT SrcVT = N.getValueType(); MVT DestVT = TLI.getValueType(I.getType()); - SDOperand Result; + SDValue Result; if (DestVT.bitsLT(SrcVT)) Result = DAG.getNode(ISD::TRUNCATE, DestVT, N); else @@ -2643,7 +2643,7 @@ void SelectionDAGLowering::visitPtrToInt(User &I) { void SelectionDAGLowering::visitIntToPtr(User &I) { // What to do depends on the size of the integer and the size of the pointer. // We can either truncate, zero extend, or no-op, accordingly. - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT SrcVT = N.getValueType(); MVT DestVT = TLI.getValueType(I.getType()); if (DestVT.bitsLT(SrcVT)) @@ -2654,7 +2654,7 @@ void SelectionDAGLowering::visitIntToPtr(User &I) { } void SelectionDAGLowering::visitBitCast(User &I) { - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); MVT DestVT = TLI.getValueType(I.getType()); // BitCast assures us that source and destination are the same size so this @@ -2666,9 +2666,9 @@ void SelectionDAGLowering::visitBitCast(User &I) { } void SelectionDAGLowering::visitInsertElement(User &I) { - SDOperand InVec = getValue(I.getOperand(0)); - SDOperand InVal = getValue(I.getOperand(1)); - SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), + SDValue InVec = getValue(I.getOperand(0)); + SDValue InVal = getValue(I.getOperand(1)); + SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), getValue(I.getOperand(2))); setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, @@ -2677,17 +2677,17 @@ void SelectionDAGLowering::visitInsertElement(User &I) { } void SelectionDAGLowering::visitExtractElement(User &I) { - SDOperand InVec = getValue(I.getOperand(0)); - SDOperand InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), + SDValue InVec = getValue(I.getOperand(0)); + SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, TLI.getPointerTy(), getValue(I.getOperand(1))); setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, TLI.getValueType(I.getType()), InVec, InIdx)); } void SelectionDAGLowering::visitShuffleVector(User &I) { - SDOperand V1 = getValue(I.getOperand(0)); - SDOperand V2 = getValue(I.getOperand(1)); - SDOperand Mask = getValue(I.getOperand(2)); + SDValue V1 = getValue(I.getOperand(0)); + SDValue V2 = getValue(I.getOperand(1)); + SDValue Mask = getValue(I.getOperand(2)); setValue(&I, DAG.getNode(ISD::VECTOR_SHUFFLE, TLI.getValueType(I.getType()), @@ -2712,23 +2712,23 @@ void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) { unsigned NumAggValues = AggValueVTs.size(); unsigned NumValValues = ValValueVTs.size(); - SmallVector<SDOperand, 4> Values(NumAggValues); + SmallVector<SDValue, 4> Values(NumAggValues); - SDOperand Agg = getValue(Op0); - SDOperand Val = getValue(Op1); + SDValue Agg = getValue(Op0); + SDValue Val = getValue(Op1); unsigned i = 0; // Copy the beginning value(s) from the original aggregate. for (; i != LinearIndex; ++i) Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) : - SDOperand(Agg.Val, Agg.ResNo + i); + SDValue(Agg.Val, Agg.ResNo + i); // Copy values from the inserted value(s). for (; i != LinearIndex + NumValValues; ++i) Values[i] = FromUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) : - SDOperand(Val.Val, Val.ResNo + i - LinearIndex); + SDValue(Val.Val, Val.ResNo + i - LinearIndex); // Copy remaining value(s) from the original aggregate. for (; i != NumAggValues; ++i) Values[i] = IntoUndef ? DAG.getNode(ISD::UNDEF, AggValueVTs[i]) : - SDOperand(Agg.Val, Agg.ResNo + i); + SDValue(Agg.Val, Agg.ResNo + i); setValue(&I, DAG.getMergeValues(DAG.getVTList(&AggValueVTs[0], NumAggValues), &Values[0], NumAggValues)); @@ -2747,14 +2747,14 @@ void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) { ComputeValueVTs(TLI, ValTy, ValValueVTs); unsigned NumValValues = ValValueVTs.size(); - SmallVector<SDOperand, 4> Values(NumValValues); + SmallVector<SDValue, 4> Values(NumValValues); - SDOperand Agg = getValue(Op0); + SDValue Agg = getValue(Op0); // Copy out the selected value(s). for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i) Values[i - LinearIndex] = OutOfUndef ? DAG.getNode(ISD::UNDEF, Agg.Val->getValueType(Agg.ResNo + i)) : - SDOperand(Agg.Val, Agg.ResNo + i); + SDValue(Agg.Val, Agg.ResNo + i); setValue(&I, DAG.getMergeValues(DAG.getVTList(&ValValueVTs[0], NumValValues), &Values[0], NumValValues)); @@ -2762,7 +2762,7 @@ void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) { void SelectionDAGLowering::visitGetElementPtr(User &I) { - SDOperand N = getValue(I.getOperand(0)); + SDValue N = getValue(I.getOperand(0)); const Type *Ty = I.getOperand(0)->getType(); for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end(); @@ -2792,7 +2792,7 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) { // N = N + Idx * ElementSize; uint64_t ElementSize = TD->getABITypeSize(Ty); - SDOperand IdxN = getValue(Idx); + SDValue IdxN = getValue(Idx); // If the index is smaller or larger than intptr_t, truncate or extend // it. @@ -2811,7 +2811,7 @@ void SelectionDAGLowering::visitGetElementPtr(User &I) { continue; } - SDOperand Scale = DAG.getIntPtrConstant(ElementSize); + SDValue Scale = DAG.getIntPtrConstant(ElementSize); IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale); N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN); } @@ -2831,7 +2831,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) { std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), I.getAlignment()); - SDOperand AllocSize = getValue(I.getArraySize()); + SDValue AllocSize = getValue(I.getArraySize()); MVT IntPtr = TLI.getPointerTy(); if (IntPtr.bitsLT(AllocSize.getValueType())) AllocSize = DAG.getNode(ISD::TRUNCATE, IntPtr, AllocSize); @@ -2857,10 +2857,10 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) { AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize, DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1))); - SDOperand Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) }; + SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) }; const MVT *VTs = DAG.getNodeValueTypes(AllocSize.getValueType(), MVT::Other); - SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3); + SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, VTs, 2, Ops, 3); setValue(&I, DSA); DAG.setRoot(DSA.getValue(1)); @@ -2871,7 +2871,7 @@ void SelectionDAGLowering::visitAlloca(AllocaInst &I) { void SelectionDAGLowering::visitLoad(LoadInst &I) { const Value *SV = I.getOperand(0); - SDOperand Ptr = getValue(SV); + SDValue Ptr = getValue(SV); const Type *Ty = I.getType(); bool isVolatile = I.isVolatile(); @@ -2884,7 +2884,7 @@ void SelectionDAGLowering::visitLoad(LoadInst &I) { if (NumValues == 0) return; - SDOperand Root; + SDValue Root; bool ConstantMemory = false; if (I.isVolatile()) // Serialize volatile loads with other side effects. @@ -2898,11 +2898,11 @@ void SelectionDAGLowering::visitLoad(LoadInst &I) { Root = DAG.getRoot(); } - SmallVector<SDOperand, 4> Values(NumValues); - SmallVector<SDOperand, 4> Chains(NumValues); + SmallVector<SDValue, 4> Values(NumValues); + SmallVector<SDValue, 4> Chains(NumValues); MVT PtrVT = Ptr.getValueType(); for (unsigned i = 0; i != NumValues; ++i) { - SDOperand L = DAG.getLoad(ValueVTs[i], Root, + SDValue L = DAG.getLoad(ValueVTs[i], Root, DAG.getNode(ISD::ADD, PtrVT, Ptr, DAG.getConstant(Offsets[i], PtrVT)), SV, Offsets[i], @@ -2912,7 +2912,7 @@ void SelectionDAGLowering::visitLoad(LoadInst &I) { } if (!ConstantMemory) { - SDOperand Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, + SDValue Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Chains[0], NumValues); if (isVolatile) DAG.setRoot(Chain); @@ -2927,9 +2927,9 @@ void SelectionDAGLowering::visitLoad(LoadInst &I) { void SelectionDAGLowering::visitStore(StoreInst &I) { Value *SrcV = I.getOperand(0); - SDOperand Src = getValue(SrcV); + SDValue Src = getValue(SrcV); Value *PtrV = I.getOperand(1); - SDOperand Ptr = getValue(PtrV); + SDValue Ptr = getValue(PtrV); SmallVector<MVT, 4> ValueVTs; SmallVector<uint64_t, 4> Offsets; @@ -2938,13 +2938,13 @@ void SelectionDAGLowering::visitStore(StoreInst &I) { if (NumValues == 0) return; - SDOperand Root = getRoot(); - SmallVector<SDOperand, 4> Chains(NumValues); + SDValue Root = getRoot(); + SmallVector<SDValue, 4> Chains(NumValues); MVT PtrVT = Ptr.getValueType(); bool isVolatile = I.isVolatile(); unsigned Alignment = I.getAlignment(); for (unsigned i = 0; i != NumValues; ++i) - Chains[i] = DAG.getStore(Root, SDOperand(Src.Val, Src.ResNo + i), + Chains[i] = DAG.getStore(Root, SDValue(Src.Val, Src.ResNo + i), DAG.getNode(ISD::ADD, PtrVT, Ptr, DAG.getConstant(Offsets[i], PtrVT)), PtrV, Offsets[i], @@ -2961,7 +2961,7 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, bool OnlyLoad = HasChain && I.onlyReadsMemory(); // Build the operand list. - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; if (HasChain) { // If this intrinsic has side-effects, chainify it. if (OnlyLoad) { // We don't need to serialize loads against other loads. @@ -2976,7 +2976,7 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, // Add all operands of the call to the operand list. for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) { - SDOperand Op = getValue(I.getOperand(i)); + SDValue Op = getValue(I.getOperand(i)); assert(TLI.isTypeLegal(Op.getValueType()) && "Intrinsic uses a non-legal type?"); Ops.push_back(Op); @@ -3002,7 +3002,7 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, const MVT *VTList = DAG.getNodeValueTypes(VTs); // Create the node. - SDOperand Result; + SDValue Result; if (!HasChain) Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VTList, VTs.size(), &Ops[0], Ops.size()); @@ -3014,7 +3014,7 @@ void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I, &Ops[0], Ops.size()); if (HasChain) { - SDOperand Chain = Result.getValue(Result.Val->getNumValues()-1); + SDValue Chain = Result.getValue(Result.Val->getNumValues()-1); if (OnlyLoad) PendingLoads.push_back(Chain); else @@ -3098,8 +3098,8 @@ static void addCatchInfo(CallInst &I, MachineModuleInfo *MMI, // Op is the associated NodeType for I const char * SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) { - SDOperand Root = getRoot(); - SDOperand L = DAG.getAtomic(Op, Root, + SDValue Root = getRoot(); + SDValue L = DAG.getAtomic(Op, Root, getValue(I.getOperand(1)), getValue(I.getOperand(2)), I.getOperand(1)); @@ -3137,9 +3137,9 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { break; case Intrinsic::memcpy_i32: case Intrinsic::memcpy_i64: { - SDOperand Op1 = getValue(I.getOperand(1)); - SDOperand Op2 = getValue(I.getOperand(2)); - SDOperand Op3 = getValue(I.getOperand(3)); + SDValue Op1 = getValue(I.getOperand(1)); + SDValue Op2 = getValue(I.getOperand(2)); + SDValue Op3 = getValue(I.getOperand(3)); unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue(); DAG.setRoot(DAG.getMemcpy(getRoot(), Op1, Op2, Op3, Align, false, I.getOperand(1), 0, I.getOperand(2), 0)); @@ -3147,9 +3147,9 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { } case Intrinsic::memset_i32: case Intrinsic::memset_i64: { - SDOperand Op1 = getValue(I.getOperand(1)); - SDOperand Op2 = getValue(I.getOperand(2)); - SDOperand Op3 = getValue(I.getOperand(3)); + SDValue Op1 = getValue(I.getOperand(1)); + SDValue Op2 = getValue(I.getOperand(2)); + SDValue Op3 = getValue(I.getOperand(3)); unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue(); DAG.setRoot(DAG.getMemset(getRoot(), Op1, Op2, Op3, Align, I.getOperand(1), 0)); @@ -3157,9 +3157,9 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { } case Intrinsic::memmove_i32: case Intrinsic::memmove_i64: { - SDOperand Op1 = getValue(I.getOperand(1)); - SDOperand Op2 = getValue(I.getOperand(2)); - SDOperand Op3 = getValue(I.getOperand(3)); + SDValue Op1 = getValue(I.getOperand(1)); + SDValue Op2 = getValue(I.getOperand(2)); + SDValue Op3 = getValue(I.getOperand(3)); unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue(); // If the source and destination are known to not be aliases, we can @@ -3250,9 +3250,9 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { } // Insert the EXCEPTIONADDR instruction. SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other); - SDOperand Ops[1]; + SDValue Ops[1]; Ops[0] = DAG.getRoot(); - SDOperand Op = DAG.getNode(ISD::EXCEPTIONADDR, VTs, Ops, 1); + SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, VTs, Ops, 1); setValue(&I, Op); DAG.setRoot(Op.getValue(1)); return 0; @@ -3278,10 +3278,10 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { // Insert the EHSELECTION instruction. SDVTList VTs = DAG.getVTList(VT, MVT::Other); - SDOperand Ops[2]; + SDValue Ops[2]; Ops[0] = getValue(I.getOperand(1)); Ops[1] = getRoot(); - SDOperand Op = DAG.getNode(ISD::EHSELECTION, VTs, Ops, 2); + SDValue Op = DAG.getNode(ISD::EHSELECTION, VTs, Ops, 2); setValue(&I, Op); DAG.setRoot(Op.getValue(1)); } else { @@ -3338,7 +3338,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { case Intrinsic::eh_dwarf_cfa: { MVT VT = getValue(I.getOperand(1)).getValueType(); - SDOperand CfaArg; + SDValue CfaArg; if (VT.bitsGT(TLI.getPointerTy())) CfaArg = DAG.getNode(ISD::TRUNCATE, TLI.getPointerTy(), getValue(I.getOperand(1))); @@ -3346,7 +3346,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { CfaArg = DAG.getNode(ISD::SIGN_EXTEND, TLI.getPointerTy(), getValue(I.getOperand(1))); - SDOperand Offset = DAG.getNode(ISD::ADD, + SDValue Offset = DAG.getNode(ISD::ADD, TLI.getPointerTy(), DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, TLI.getPointerTy()), @@ -3389,13 +3389,13 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { getValue(I.getOperand(2)))); return 0; case Intrinsic::pcmarker: { - SDOperand Tmp = getValue(I.getOperand(1)); + SDValue Tmp = getValue(I.getOperand(1)); DAG.setRoot(DAG.getNode(ISD::PCMARKER, MVT::Other, getRoot(), Tmp)); return 0; } case Intrinsic::readcyclecounter: { - SDOperand Op = getRoot(); - SDOperand Tmp = DAG.getNode(ISD::READCYCLECOUNTER, + SDValue Op = getRoot(); + SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, DAG.getNodeValueTypes(MVT::i64, MVT::Other), 2, &Op, 1); setValue(&I, Tmp); @@ -3418,36 +3418,36 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { getValue(I.getOperand(1)))); return 0; case Intrinsic::cttz: { - SDOperand Arg = getValue(I.getOperand(1)); + SDValue Arg = getValue(I.getOperand(1)); MVT Ty = Arg.getValueType(); - SDOperand result = DAG.getNode(ISD::CTTZ, Ty, Arg); + SDValue result = DAG.getNode(ISD::CTTZ, Ty, Arg); setValue(&I, result); return 0; } case Intrinsic::ctlz: { - SDOperand Arg = getValue(I.getOperand(1)); + SDValue Arg = getValue(I.getOperand(1)); MVT Ty = Arg.getValueType(); - SDOperand result = DAG.getNode(ISD::CTLZ, Ty, Arg); + SDValue result = DAG.getNode(ISD::CTLZ, Ty, Arg); setValue(&I, result); return 0; } case Intrinsic::ctpop: { - SDOperand Arg = getValue(I.getOperand(1)); + SDValue Arg = getValue(I.getOperand(1)); MVT Ty = Arg.getValueType(); - SDOperand result = DAG.getNode(ISD::CTPOP, Ty, Arg); + SDValue result = DAG.getNode(ISD::CTPOP, Ty, Arg); setValue(&I, result); return 0; } case Intrinsic::stacksave: { - SDOperand Op = getRoot(); - SDOperand Tmp = DAG.getNode(ISD::STACKSAVE, + SDValue Op = getRoot(); + SDValue Tmp = DAG.getNode(ISD::STACKSAVE, DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, &Op, 1); setValue(&I, Tmp); DAG.setRoot(Tmp.getValue(1)); return 0; } case Intrinsic::stackrestore: { - SDOperand Tmp = getValue(I.getOperand(1)); + SDValue Tmp = getValue(I.getOperand(1)); DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, MVT::Other, getRoot(), Tmp)); return 0; } @@ -3458,7 +3458,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { case Intrinsic::init_trampoline: { const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts()); - SDOperand Ops[6]; + SDValue Ops[6]; Ops[0] = getRoot(); Ops[1] = getValue(I.getOperand(1)); Ops[2] = getValue(I.getOperand(2)); @@ -3466,7 +3466,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { Ops[4] = DAG.getSrcValue(I.getOperand(1)); Ops[5] = DAG.getSrcValue(F); - SDOperand Tmp = DAG.getNode(ISD::TRAMPOLINE, + SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, DAG.getNodeValueTypes(TLI.getPointerTy(), MVT::Other), 2, Ops, 6); @@ -3501,7 +3501,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { return 0; } case Intrinsic::prefetch: { - SDOperand Ops[4]; + SDValue Ops[4]; Ops[0] = getRoot(); Ops[1] = getValue(I.getOperand(1)); Ops[2] = getValue(I.getOperand(2)); @@ -3511,7 +3511,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { } case Intrinsic::memory_barrier: { - SDOperand Ops[6]; + SDValue Ops[6]; Ops[0] = getRoot(); for (int x = 1; x < 6; ++x) Ops[x] = getValue(I.getOperand(x)); @@ -3520,8 +3520,8 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { return 0; } case Intrinsic::atomic_cmp_swap: { - SDOperand Root = getRoot(); - SDOperand L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, Root, + SDValue Root = getRoot(); + SDValue L = DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, Root, getValue(I.getOperand(1)), getValue(I.getOperand(2)), getValue(I.getOperand(3)), @@ -3556,7 +3556,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) { } -void SelectionDAGLowering::LowerCallTo(CallSite CS, SDOperand Callee, +void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee, bool IsTailCall, MachineBasicBlock *LandingPad) { const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType()); @@ -3569,7 +3569,7 @@ void SelectionDAGLowering::LowerCallTo(CallSite CS, SDOperand Callee, Args.reserve(CS.arg_size()); for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) { - SDOperand ArgNode = getValue(*i); + SDValue ArgNode = getValue(*i); Entry.Node = ArgNode; Entry.Ty = (*i)->getType(); unsigned attrInd = i - CS.arg_begin() + 1; @@ -3593,7 +3593,7 @@ void SelectionDAGLowering::LowerCallTo(CallSite CS, SDOperand Callee, DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getControlRoot(), BeginLabel)); } - std::pair<SDOperand,SDOperand> Result = + std::pair<SDValue,SDValue> Result = TLI.LowerCallTo(getRoot(), CS.getType(), CS.paramHasAttr(0, ParamAttr::SExt), CS.paramHasAttr(0, ParamAttr::ZExt), @@ -3638,8 +3638,8 @@ void SelectionDAGLowering::visitCall(CallInst &I) { I.getOperand(1)->getType()->isFloatingPoint() && I.getType() == I.getOperand(1)->getType() && I.getType() == I.getOperand(2)->getType()) { - SDOperand LHS = getValue(I.getOperand(1)); - SDOperand RHS = getValue(I.getOperand(2)); + SDValue LHS = getValue(I.getOperand(1)); + SDValue RHS = getValue(I.getOperand(2)); setValue(&I, DAG.getNode(ISD::FCOPYSIGN, LHS.getValueType(), LHS, RHS)); return; @@ -3651,7 +3651,7 @@ void SelectionDAGLowering::visitCall(CallInst &I) { if (I.getNumOperands() == 2 && // Basic sanity checks. I.getOperand(1)->getType()->isFloatingPoint() && I.getType() == I.getOperand(1)->getType()) { - SDOperand Tmp = getValue(I.getOperand(1)); + SDValue Tmp = getValue(I.getOperand(1)); setValue(&I, DAG.getNode(ISD::FABS, Tmp.getValueType(), Tmp)); return; } @@ -3662,7 +3662,7 @@ void SelectionDAGLowering::visitCall(CallInst &I) { if (I.getNumOperands() == 2 && // Basic sanity checks. I.getOperand(1)->getType()->isFloatingPoint() && I.getType() == I.getOperand(1)->getType()) { - SDOperand Tmp = getValue(I.getOperand(1)); + SDValue Tmp = getValue(I.getOperand(1)); setValue(&I, DAG.getNode(ISD::FSIN, Tmp.getValueType(), Tmp)); return; } @@ -3673,7 +3673,7 @@ void SelectionDAGLowering::visitCall(CallInst &I) { if (I.getNumOperands() == 2 && // Basic sanity checks. I.getOperand(1)->getType()->isFloatingPoint() && I.getType() == I.getOperand(1)->getType()) { - SDOperand Tmp = getValue(I.getOperand(1)); + SDValue Tmp = getValue(I.getOperand(1)); setValue(&I, DAG.getNode(ISD::FCOS, Tmp.getValueType(), Tmp)); return; } @@ -3684,7 +3684,7 @@ void SelectionDAGLowering::visitCall(CallInst &I) { return; } - SDOperand Callee; + SDValue Callee; if (!RenameFn) Callee = getValue(I.getOperand(0)); else @@ -3698,12 +3698,12 @@ void SelectionDAGLowering::visitCall(CallInst &I) { /// this value and returns the result as a ValueVT value. This uses /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. -SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, - SDOperand &Chain, - SDOperand *Flag) const { +SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, + SDValue &Chain, + SDValue *Flag) const { // Assemble the legal parts into the final values. - SmallVector<SDOperand, 4> Values(ValueVTs.size()); - SmallVector<SDOperand, 8> Parts; + SmallVector<SDValue, 4> Values(ValueVTs.size()); + SmallVector<SDValue, 8> Parts; for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { // Copy the legal parts from the registers. MVT ValueVT = ValueVTs[Value]; @@ -3712,7 +3712,7 @@ SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, Parts.resize(NumRegs); for (unsigned i = 0; i != NumRegs; ++i) { - SDOperand P; + SDValue P; if (Flag == 0) P = DAG.getCopyFromReg(Chain, Regs[Part+i], RegisterVT); else { @@ -3779,11 +3779,11 @@ SDOperand RegsForValue::getCopyFromRegs(SelectionDAG &DAG, /// specified value into the registers specified by this object. This uses /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. -void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, - SDOperand &Chain, SDOperand *Flag) const { +void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, + SDValue &Chain, SDValue *Flag) const { // Get the list of the values's legal parts. unsigned NumRegs = Regs.size(); - SmallVector<SDOperand, 8> Parts(NumRegs); + SmallVector<SDValue, 8> Parts(NumRegs); for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { MVT ValueVT = ValueVTs[Value]; unsigned NumParts = TLI->getNumRegisters(ValueVT); @@ -3795,9 +3795,9 @@ void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, } // Copy the parts into the registers. - SmallVector<SDOperand, 8> Chains(NumRegs); + SmallVector<SDValue, 8> Chains(NumRegs); for (unsigned i = 0; i != NumRegs; ++i) { - SDOperand Part; + SDValue Part; if (Flag == 0) Part = DAG.getCopyToReg(Chain, Regs[i], Parts[i]); else { @@ -3827,7 +3827,7 @@ void RegsForValue::getCopyToRegs(SDOperand Val, SelectionDAG &DAG, /// operand list. This adds the code marker and includes the number of /// values added into it. void RegsForValue::AddInlineAsmOperands(unsigned Code, SelectionDAG &DAG, - std::vector<SDOperand> &Ops) const { + std::vector<SDValue> &Ops) const { MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy(); Ops.push_back(DAG.getTargetConstant(Code | (Regs.size() << 3), IntPtrTy)); for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { @@ -3894,7 +3894,7 @@ struct SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { /// CallOperand - If this is the result output operand or a clobber /// this is null, otherwise it is the incoming operand to the CallInst. /// This gets modified as the asm is processed. - SDOperand CallOperand; + SDValue CallOperand; /// AssignedRegs - If this is a register or register class operand, this /// contains the set of register corresponding to the operand. @@ -4115,8 +4115,8 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { /// ConstraintOperands - Information about all of the constraints. std::vector<SDISelAsmOperandInfo> ConstraintOperands; - SDOperand Chain = getRoot(); - SDOperand Flag; + SDValue Chain = getRoot(); + SDValue Flag; std::set<unsigned> OutputRegs, InputRegs; @@ -4247,7 +4247,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align); - SDOperand StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy()); + SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy()); Chain = DAG.getStore(Chain, OpInfo.CallOperand, StackSlot, NULL, 0); OpInfo.CallOperand = StackSlot; } @@ -4278,8 +4278,8 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { } // AsmNodeOperands - The operands for the ISD::INLINEASM node. - std::vector<SDOperand> AsmNodeOperands; - AsmNodeOperands.push_back(SDOperand()); // reserve space for input chain + std::vector<SDValue> AsmNodeOperands; + AsmNodeOperands.push_back(SDValue()); // reserve space for input chain AsmNodeOperands.push_back( DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other)); @@ -4338,7 +4338,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { break; } case InlineAsm::isInput: { - SDOperand InOperandVal = OpInfo.CallOperand; + SDValue InOperandVal = OpInfo.CallOperand; if (isdigit(OpInfo.ConstraintCode[0])) { // Matching constraint? // If this is required to match an output register we have already set, @@ -4392,7 +4392,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { assert(!OpInfo.isIndirect && "Don't know how to handle indirect other inputs yet!"); - std::vector<SDOperand> Ops; + std::vector<SDValue> Ops; TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0], Ops, DAG); if (Ops.empty()) { @@ -4459,7 +4459,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { // If this asm returns a register value, copy the result from that register // and set it as the value of the call. if (!RetValRegs.Regs.empty()) { - SDOperand Val = RetValRegs.getCopyFromRegs(DAG, Chain, &Flag); + SDValue Val = RetValRegs.getCopyFromRegs(DAG, Chain, &Flag); // If any of the results of the inline asm is a vector, it may have the // wrong width/num elts. This can happen for register classes that can @@ -4481,19 +4481,19 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { setValue(CS.getInstruction(), Val); } - std::vector<std::pair<SDOperand, Value*> > StoresToEmit; + std::vector<std::pair<SDValue, Value*> > StoresToEmit; // Process indirect outputs, first output all of the flagged copies out of // physregs. for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) { RegsForValue &OutRegs = IndirectStoresToEmit[i].first; Value *Ptr = IndirectStoresToEmit[i].second; - SDOperand OutVal = OutRegs.getCopyFromRegs(DAG, Chain, &Flag); + SDValue OutVal = OutRegs.getCopyFromRegs(DAG, Chain, &Flag); StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); } // Emit the non-flagged stores from the physregs. - SmallVector<SDOperand, 8> OutChains; + SmallVector<SDValue, 8> OutChains; for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i) OutChains.push_back(DAG.getStore(Chain, StoresToEmit[i].first, getValue(StoresToEmit[i].second), @@ -4506,7 +4506,7 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) { void SelectionDAGLowering::visitMalloc(MallocInst &I) { - SDOperand Src = getValue(I.getOperand(0)); + SDValue Src = getValue(I.getOperand(0)); MVT IntPtr = TLI.getPointerTy(); @@ -4526,7 +4526,7 @@ void SelectionDAGLowering::visitMalloc(MallocInst &I) { Entry.Ty = TLI.getTargetData()->getIntPtrType(); Args.push_back(Entry); - std::pair<SDOperand,SDOperand> Result = + std::pair<SDValue,SDValue> Result = TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, CallingConv::C, true, DAG.getExternalSymbol("malloc", IntPtr), Args, DAG); setValue(&I, Result.first); // Pointers always fit in registers @@ -4540,7 +4540,7 @@ void SelectionDAGLowering::visitFree(FreeInst &I) { Entry.Ty = TLI.getTargetData()->getIntPtrType(); Args.push_back(Entry); MVT IntPtr = TLI.getPointerTy(); - std::pair<SDOperand,SDOperand> Result = + std::pair<SDValue,SDValue> Result = TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, CallingConv::C, true, DAG.getExternalSymbol("free", IntPtr), Args, DAG); @@ -4568,7 +4568,7 @@ void SelectionDAGLowering::visitVAStart(CallInst &I) { } void SelectionDAGLowering::visitVAArg(VAArgInst &I) { - SDOperand V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(), + SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0))); setValue(&I, V); @@ -4594,9 +4594,9 @@ void SelectionDAGLowering::visitVACopy(CallInst &I) { /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be /// integrated into SDISel. void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, - SmallVectorImpl<SDOperand> &ArgValues) { + SmallVectorImpl<SDValue> &ArgValues) { // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node. - SmallVector<SDOperand, 3+16> Ops; + SmallVector<SDValue, 3+16> Ops; Ops.push_back(DAG.getRoot()); Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy())); Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy())); @@ -4666,7 +4666,7 @@ void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, // Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but // allows exposing the loads that may be part of the argument access to the // first DAGCombiner pass. - SDOperand TmpRes = LowerOperation(SDOperand(Result, 0), DAG); + SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG); // The number of results should match up, except that the lowered one may have // an extra flag result. @@ -4684,7 +4684,7 @@ void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, Result = TmpRes.Val; unsigned NumArgRegs = Result->getNumValues() - 1; - DAG.setRoot(SDOperand(Result, NumArgRegs)); + DAG.setRoot(SDValue(Result, NumArgRegs)); // Set up the return result vector. unsigned i = 0; @@ -4699,9 +4699,9 @@ void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, MVT PartVT = getRegisterType(VT); unsigned NumParts = getNumRegisters(VT); - SmallVector<SDOperand, 4> Parts(NumParts); + SmallVector<SDValue, 4> Parts(NumParts); for (unsigned j = 0; j != NumParts; ++j) - Parts[j] = SDOperand(Result, i++); + Parts[j] = SDValue(Result, i++); ISD::NodeType AssertOp = ISD::DELETED_NODE; if (F.paramHasAttr(Idx, ParamAttr::SExt)) @@ -4721,13 +4721,13 @@ void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, /// implementation, which just inserts an ISD::CALL node, which is later custom /// lowered by the target to something concrete. FIXME: When all targets are /// migrated to using ISD::CALL, this hook should be integrated into SDISel. -std::pair<SDOperand, SDOperand> -TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, +std::pair<SDValue, SDValue> +TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, bool isVarArg, unsigned CallingConv, bool isTailCall, - SDOperand Callee, + SDValue Callee, ArgListTy &Args, SelectionDAG &DAG) { - SmallVector<SDOperand, 32> Ops; + SmallVector<SDValue, 32> Ops; Ops.push_back(Chain); // Op#0 - Chain Ops.push_back(DAG.getConstant(CallingConv, getPointerTy())); // Op#1 - CC Ops.push_back(DAG.getConstant(isVarArg, getPointerTy())); // Op#2 - VarArg @@ -4742,7 +4742,7 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, Value != NumValues; ++Value) { MVT VT = ValueVTs[Value]; const Type *ArgTy = VT.getTypeForMVT(); - SDOperand Op = SDOperand(Args[i].Node.Val, Args[i].Node.ResNo + Value); + SDValue Op = SDValue(Args[i].Node.Val, Args[i].Node.ResNo + Value); ISD::ArgFlagsTy Flags; unsigned OriginalAlignment = getTargetData()->getABITypeAlignment(ArgTy); @@ -4774,7 +4774,7 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, MVT PartVT = getRegisterType(VT); unsigned NumParts = getNumRegisters(VT); - SmallVector<SDOperand, 4> Parts(NumParts); + SmallVector<SDValue, 4> Parts(NumParts); ISD::NodeType ExtendKind = ISD::ANY_EXTEND; if (Args[i].isSExt) @@ -4816,7 +4816,7 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, LoweredRetTys.push_back(MVT::Other); // Always has a chain. // Create the CALL node. - SDOperand Res = DAG.getNode(ISD::CALL, + SDValue Res = DAG.getNode(ISD::CALL, DAG.getVTList(&LoweredRetTys[0], LoweredRetTys.size()), &Ops[0], Ops.size()); @@ -4831,17 +4831,17 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, else if (RetZExt) AssertOp = ISD::AssertZext; - SmallVector<SDOperand, 4> ReturnValues; + SmallVector<SDValue, 4> ReturnValues; unsigned RegNo = 0; for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { MVT VT = RetTys[I]; MVT RegisterVT = getRegisterType(VT); unsigned NumRegs = getNumRegisters(VT); unsigned RegNoEnd = NumRegs + RegNo; - SmallVector<SDOperand, 4> Results; + SmallVector<SDValue, 4> Results; for (; RegNo != RegNoEnd; ++RegNo) Results.push_back(Res.getValue(RegNo)); - SDOperand ReturnValue = + SDValue ReturnValue = getCopyFromParts(DAG, &Results[0], NumRegs, RegisterVT, VT, AssertOp); ReturnValues.push_back(ReturnValue); @@ -4853,10 +4853,10 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, return std::make_pair(Res, Chain); } -SDOperand TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { +SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { assert(0 && "LowerOperation not implemented for this target!"); abort(); - return SDOperand(); + return SDValue(); } @@ -4912,14 +4912,14 @@ bool SelectionDAGISel::runOnFunction(Function &Fn) { } void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) { - SDOperand Op = getValue(V); + SDValue Op = getValue(V); assert((Op.getOpcode() != ISD::CopyFromReg || cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && "Copy from a reg to the same reg!"); assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg"); RegsForValue RFV(TLI, Reg, V->getType()); - SDOperand Chain = DAG.getEntryNode(); + SDValue Chain = DAG.getEntryNode(); RFV.getCopyToRegs(Op, DAG, Chain, 0); PendingExports.push_back(Chain); } @@ -4929,8 +4929,8 @@ LowerArguments(BasicBlock *LLVMBB, SelectionDAGLowering &SDL) { // If this is the entry block, emit arguments. Function &F = *LLVMBB->getParent(); FunctionLoweringInfo &FuncInfo = SDL.FuncInfo; - SDOperand OldRoot = SDL.DAG.getRoot(); - SmallVector<SDOperand, 16> Args; + SDValue OldRoot = SDL.DAG.getRoot(); + SmallVector<SDValue, 16> Args; TLI.LowerArguments(F, SDL.DAG, Args); unsigned a = 0; @@ -4972,7 +4972,7 @@ static void copyCatchInfo(BasicBlock *SrcBB, BasicBlock *DestBB, /// IsFixedFrameObjectWithPosOffset - Check if object is a fixed frame object and /// whether object offset >= 0. static bool -IsFixedFrameObjectWithPosOffset(MachineFrameInfo * MFI, SDOperand Op) { +IsFixedFrameObjectWithPosOffset(MachineFrameInfo * MFI, SDValue Op) { if (!isa<FrameIndexSDNode>(Op)) return false; FrameIndexSDNode * FrameIdxNode = dyn_cast<FrameIndexSDNode>(Op); @@ -4986,7 +4986,7 @@ IsFixedFrameObjectWithPosOffset(MachineFrameInfo * MFI, SDOperand Op) { /// call. Currently the implementation of this call is very conservative and /// assumes all arguments sourcing from FORMAL_ARGUMENTS or a CopyFromReg with /// virtual registers would be overwritten by direct lowering. -static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op, +static bool IsPossiblyOverwrittenArgumentOfTailCall(SDValue Op, MachineFrameInfo * MFI) { RegisterSDNode * OpReg = NULL; if (Op.getOpcode() == ISD::FORMAL_ARGUMENTS || @@ -5008,7 +5008,7 @@ static bool IsPossiblyOverwrittenArgumentOfTailCall(SDOperand Op, static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG, TargetLowering& TLI) { SDNode * Ret = NULL; - SDOperand Terminator = DAG.getRoot(); + SDValue Terminator = DAG.getRoot(); // Find RET node. if (Terminator.getOpcode() == ISD::RET) { @@ -5020,8 +5020,8 @@ static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG, BI = DAG.allnodes_end(); BI != BE; ) { --BI; if (BI->getOpcode() == ISD::CALL) { - SDOperand OpRet(Ret, 0); - SDOperand OpCall(BI, 0); + SDValue OpRet(Ret, 0); + SDValue OpCall(BI, 0); bool isMarkedTailCall = cast<ConstantSDNode>(OpCall.getOperand(3))->getValue() != 0; // If CALL node has tail call attribute set to true and the call is not @@ -5032,7 +5032,7 @@ static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG, if (Ret==NULL || !TLI.IsEligibleForTailCallOptimization(OpCall, OpRet, DAG)) { // Not eligible. Mark CALL node as non tail call. - SmallVector<SDOperand, 32> Ops; + SmallVector<SDValue, 32> Ops; unsigned idx=0; for(SDNode::op_iterator I =OpCall.Val->op_begin(), E = OpCall.Val->op_end(); I != E; I++, idx++) { @@ -5045,12 +5045,12 @@ static void CheckDAGForTailCallsAndFixThem(SelectionDAG &DAG, } else { // Look for tail call clobbered arguments. Emit a series of // copyto/copyfrom virtual register nodes to protect them. - SmallVector<SDOperand, 32> Ops; - SDOperand Chain = OpCall.getOperand(0), InFlag; + SmallVector<SDValue, 32> Ops; + SDValue Chain = OpCall.getOperand(0), InFlag; unsigned idx=0; for(SDNode::op_iterator I = OpCall.Val->op_begin(), E = OpCall.Val->op_end(); I != E; I++, idx++) { - SDOperand Arg = *I; + SDValue Arg = *I; if (idx > 4 && (idx % 2)) { bool isByVal = cast<ARG_FLAGSSDNode>(OpCall.getOperand(idx+1))-> getArgFlags().isByVal(); @@ -5271,7 +5271,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo(SelectionDAG &DAG) { continue; // Ignore non-scalar or non-integer values. - SDOperand Src = N->getOperand(2); + SDValue Src = N->getOperand(2); MVT SrcVT = Src.getValueType(); if (!SrcVT.isInteger() || SrcVT.isVector()) continue; @@ -5692,7 +5692,7 @@ HazardRecognizer *SelectionDAGISel::CreateTargetHazardRecognizer() { /// the dag combiner simplified the 255, we still want to match. RHS is the /// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value /// specified in the .td file (e.g. 255). -bool SelectionDAGISel::CheckAndMask(SDOperand LHS, ConstantSDNode *RHS, +bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS, int64_t DesiredMaskS) const { const APInt &ActualMask = RHS->getAPIntValue(); const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS); @@ -5721,7 +5721,7 @@ bool SelectionDAGISel::CheckAndMask(SDOperand LHS, ConstantSDNode *RHS, /// the dag combiner simplified the 255, we still want to match. RHS is the /// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value /// specified in the .td file (e.g. 255). -bool SelectionDAGISel::CheckOrMask(SDOperand LHS, ConstantSDNode *RHS, +bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS, int64_t DesiredMaskS) const { const APInt &ActualMask = RHS->getAPIntValue(); const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS); @@ -5755,8 +5755,8 @@ bool SelectionDAGISel::CheckOrMask(SDOperand LHS, ConstantSDNode *RHS, /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated /// by tblgen. Others should not call it. void SelectionDAGISel:: -SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) { - std::vector<SDOperand> InOps; +SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, SelectionDAG &DAG) { + std::vector<SDValue> InOps; std::swap(InOps, Ops); Ops.push_back(InOps[0]); // input chain. @@ -5775,7 +5775,7 @@ SelectInlineAsmMemoryOperands(std::vector<SDOperand> &Ops, SelectionDAG &DAG) { } else { assert((Flags >> 3) == 1 && "Memory operand with multiple values?"); // Otherwise, this is a memory operand. Ask the target to select it. - std::vector<SDOperand> SelOps; + std::vector<SDValue> SelOps; if (SelectInlineAsmMemoryOperand(InOps[i+1], 'm', SelOps, DAG)) { cerr << "Could not match memory address. Inline asm failure!\n"; exit(1); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp index 4bcd916ff8..7deedfa1a0 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp @@ -80,7 +80,7 @@ namespace llvm { /// edge, override this method. template<typename EdgeIter> static std::string getEdgeAttributes(const void *Node, EdgeIter EI) { - SDOperand Op = EI.getNode()->getOperand(EI.getOperand()); + SDValue Op = EI.getNode()->getOperand(EI.getOperand()); MVT VT = Op.getValueType(); if (VT == MVT::Flag) return "color=red,style=bold"; diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 1fd9d953a8..b2f835991d 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -516,7 +516,7 @@ const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { } -MVT TargetLowering::getSetCCResultType(const SDOperand &) const { +MVT TargetLowering::getSetCCResultType(const SDValue &) const { return getValueType(TD->getIntPtrType()); } @@ -582,8 +582,8 @@ unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const { return TD->getCallFrameTypeAlignment(Ty); } -SDOperand TargetLowering::getPICJumpTableRelocBase(SDOperand Table, - SelectionDAG &DAG) const { +SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, + SelectionDAG &DAG) const { if (usesGlobalOffsetTable()) return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); return Table; @@ -597,7 +597,7 @@ SDOperand TargetLowering::getPICJumpTableRelocBase(SDOperand Table, /// specified instruction is a constant integer. If so, check to see if there /// are any bits set in the constant that are not demanded. If so, shrink the /// constant and return true. -bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op, +bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op, const APInt &Demanded) { // FIXME: ISD::SELECT, ISD::SELECT_CC switch(Op.getOpcode()) { @@ -608,7 +608,7 @@ bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op, if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) if (C->getAPIntValue().intersects(~Demanded)) { MVT VT = Op.getValueType(); - SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), + SDValue New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), DAG.getConstant(Demanded & C->getAPIntValue(), VT)); @@ -626,7 +626,7 @@ bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op, /// analyze the expression and return a mask of KnownOne and KnownZero bits for /// the expression (used to simplify the caller). The KnownZero/One bits may /// only be accurate for those bits in the DemandedMask. -bool TargetLowering::SimplifyDemandedBits(SDOperand Op, +bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, APInt &KnownZero, APInt &KnownOne, @@ -780,7 +780,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known if ((KnownOne & KnownOne2) == KnownOne) { MVT VT = Op.getValueType(); - SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); + SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0), ANDC)); } @@ -795,7 +795,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, if (Expanded.isAllOnesValue()) { if (Expanded != C->getAPIntValue()) { MVT VT = Op.getValueType(); - SDOperand New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), + SDValue New = TLO.DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0), TLO.DAG.getConstant(Expanded, VT)); return TLO.CombineTo(Op, New); } @@ -848,7 +848,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, case ISD::SHL: if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { unsigned ShAmt = SA->getValue(); - SDOperand InOp = Op.getOperand(0); + SDValue InOp = Op.getOperand(0); // If the shift count is an invalid immediate, don't do anything. if (ShAmt >= BitWidth) @@ -868,7 +868,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, Opc = ISD::SRL; } - SDOperand NewSA = + SDValue NewSA = TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); MVT VT = Op.getValueType(); return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, @@ -890,7 +890,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, MVT VT = Op.getValueType(); unsigned ShAmt = SA->getValue(); unsigned VTSize = VT.getSizeInBits(); - SDOperand InOp = Op.getOperand(0); + SDValue InOp = Op.getOperand(0); // If the shift count is an invalid immediate, don't do anything. if (ShAmt >= BitWidth) @@ -910,7 +910,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, Opc = ISD::SHL; } - SDOperand NewSA = + SDValue NewSA = TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, VT, InOp.getOperand(0), NewSA)); @@ -1099,7 +1099,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, // If the input is only used by this truncate, see if we can shrink it based // on the known demanded bits. if (Op.getOperand(0).Val->hasOneUse()) { - SDOperand In = Op.getOperand(0); + SDValue In = Op.getOperand(0); unsigned InBitWidth = In.getValueSizeInBits(); switch (In.getOpcode()) { default: break; @@ -1115,7 +1115,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, if (ShAmt->getValue() < BitWidth && !(HighBits & NewMask)) { // None of the shifted in bits are needed. Add a truncate of the // shift input, then shift it. - SDOperand NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, + SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, Op.getValueType(), In.getOperand(0)); return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL,Op.getValueType(), @@ -1152,10 +1152,10 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, isOperationLegal(ISD::FGETSIGN, Op.getValueType())) { // Make a FGETSIGN + SHL to move the sign bit into the appropriate // place. We expect the SHL to be eliminated by other optimizations. - SDOperand Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), + SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), Op.getOperand(0)); unsigned ShVal = Op.getValueType().getSizeInBits()-1; - SDOperand ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); + SDValue ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(), Sign, ShAmt)); } @@ -1179,7 +1179,7 @@ bool TargetLowering::SimplifyDemandedBits(SDOperand Op, /// computeMaskedBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the /// KnownZero/KnownOne bitsets. -void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, +void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -1197,7 +1197,7 @@ void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, /// ComputeNumSignBitsForTargetNode - This method can be implemented by /// targets that want to expose additional information about sign bits to the /// DAG Combiner. -unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op, +unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, unsigned Depth) const { assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || @@ -1210,9 +1210,9 @@ unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDOperand Op, /// SimplifySetCC - Try to simplify a setcc built with the specified operands -/// and cc. If it is unable to simplify it, return a null SDOperand. -SDOperand -TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, +/// and cc. If it is unable to simplify it, return a null SDValue. +SDValue +TargetLowering::SimplifySetCC(MVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -1249,7 +1249,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, // (srl (ctlz x), 5) == 1 -> X == 0 Cond = ISD::SETEQ; } - SDOperand Zero = DAG.getConstant(0, N0.getValueType()); + SDValue Zero = DAG.getConstant(0, N0.getValueType()); return DAG.getSetCC(VT, N0.getOperand(0).getOperand(0), Zero, Cond); } @@ -1313,7 +1313,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits) return DAG.getConstant(Cond == ISD::SETNE, VT); - SDOperand ZextOp; + SDValue ZextOp; MVT Op0Ty = N0.getOperand(0).getValueType(); if (Op0Ty == ExtSrcTy) { ZextOp = N0.getOperand(0); @@ -1360,7 +1360,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, APInt::getHighBitsSet(BitWidth, BitWidth-1))) { // Okay, get the un-inverted input value. - SDOperand Val; + SDValue Val; if (N0.getOpcode() == ISD::XOR) Val = N0.getOperand(0); else { @@ -1467,7 +1467,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, if (isa<ConstantFPSDNode>(N0.Val)) { // Constant fold or commute setcc. - SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond); + SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond); if (O.Val) return O; } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.Val)) { // If the RHS of an FP comparison is a constant, simplify it away in @@ -1574,7 +1574,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, else if (N0.Val->hasOneUse()) { assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); // (Z-X) == X --> Z == X<<1 - SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), + SDValue SH = DAG.getNode(ISD::SHL, N1.getValueType(), N1, DAG.getConstant(1, getShiftAmountTy())); if (!DCI.isCalledByLegalizer()) @@ -1597,7 +1597,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, } else if (N1.Val->hasOneUse()) { assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); // X == (Z-X) --> X<<1 == Z - SDOperand SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0, + SDValue SH = DAG.getNode(ISD::SHL, N1.getValueType(), N0, DAG.getConstant(1, getShiftAmountTy())); if (!DCI.isCalledByLegalizer()) DCI.AddToWorklist(SH.Val); @@ -1608,7 +1608,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, } // Fold away ALL boolean setcc's. - SDOperand Temp; + SDValue Temp; if (N0.getValueType() == MVT::i1 && foldBooleans) { switch (Cond) { default: assert(0 && "Unknown integer setcc!"); @@ -1658,7 +1658,7 @@ TargetLowering::SimplifySetCC(MVT VT, SDOperand N0, SDOperand N1, } // Could not fold it. - return SDOperand(); + return SDValue(); } /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the @@ -1673,8 +1673,8 @@ bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA, } if (N->getOpcode() == ISD::ADD) { - SDOperand N1 = N->getOperand(0); - SDOperand N2 = N->getOperand(1); + SDValue N1 = N->getOperand(0); + SDValue N2 = N->getOperand(1); if (isGAPlusOffset(N1.Val, GA, Offset)) { ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); if (V) { @@ -1705,8 +1705,8 @@ bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base, if (VT.getSizeInBits() / 8 != Bytes) return false; - SDOperand Loc = LD->getOperand(1); - SDOperand BaseLoc = Base->getOperand(1); + SDValue Loc = LD->getOperand(1); + SDValue BaseLoc = Base->getOperand(1); if (Loc.getOpcode() == ISD::FrameIndex) { if (BaseLoc.getOpcode() != ISD::FrameIndex) return false; @@ -1730,10 +1730,10 @@ bool TargetLowering::isConsecutiveLoad(SDNode *LD, SDNode *Base, } -SDOperand TargetLowering:: +SDValue TargetLowering:: PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { // Default implementation: no optimization. - return SDOperand(); + return SDValue(); } //===----------------------------------------------------------------------===// @@ -1787,9 +1787,9 @@ const char *TargetLowering::LowerXConstraint(MVT ConstraintVT) const{ /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. -void TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, +void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, - std::vector<SDOperand> &Ops, + std::vector<SDValue> &Ops, SelectionDAG &DAG) const { switch (ConstraintLetter) { default: break; @@ -1931,7 +1931,7 @@ static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { /// static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, const TargetLowering &TLI, - SDOperand Op, SelectionDAG *DAG) { + SDValue Op, SelectionDAG *DAG) { assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); unsigned BestIdx = 0; TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; @@ -1949,7 +1949,7 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, if (CType == TargetLowering::C_Other && Op.Val) { assert(OpInfo.Codes[i].size() == 1 && "Unhandled multi-letter 'other' constraint"); - std::vector<SDOperand> ResultOps; + std::vector<SDValue> ResultOps; TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], ResultOps, *DAG); if (!ResultOps.empty()) { @@ -1976,7 +1976,7 @@ static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, /// type to use for the specific AsmOperandInfo, setting /// OpInfo.ConstraintCode and OpInfo.ConstraintType. void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, - SDOperand Op, + SDValue Op, SelectionDAG *DAG) const { assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); @@ -2221,28 +2221,28 @@ static mu magicu64(uint64_t d) /// return a DAG expression to select that will generate the same value by /// multiplying by a magic number. See: /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> -SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, - std::vector<SDNode*>* Created) const { +SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, + std::vector<SDNode*>* Created) const { MVT VT = N->getValueType(0); // Check to see if we can do this. if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) - return SDOperand(); // BuildSDIV only operates on i32 or i64 + return SDValue(); // BuildSDIV only operates on i32 or i64 int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSignExtended(); ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d); // Multiply the numerator (operand 0) by the magic value - SDOperand Q; + SDValue Q; if (isOperationLegal(ISD::MULHS, VT)) Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0), DAG.getConstant(magics.m, VT)); else if (isOperationLegal(ISD::SMUL_LOHI, VT)) - Q = SDOperand(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT), + Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT), N->getOperand(0), DAG.getConstant(magics.m, VT)).Val, 1); else - return SDOperand(); // No mulhs or equvialent + return SDValue(); // No mulhs or equvialent // If d > 0 and m < 0, add the numerator if (d > 0 && magics.m < 0) { Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0)); @@ -2263,7 +2263,7 @@ SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, Created->push_back(Q.Val); } // Extract the sign bit and add it to the quotient - SDOperand T = + SDValue T = DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, getShiftAmountTy())); if (Created) @@ -2275,28 +2275,28 @@ SDOperand TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, /// return a DAG expression to select that will generate the same value by /// multiplying by a magic number. See: /// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> -SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, - std::vector<SDNode*>* Created) const { +SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, + std::vector<SDNode*>* Created) const { MVT VT = N->getValueType(0); // Check to see if we can do this. if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64)) - return SDOperand(); // BuildUDIV only operates on i32 or i64 + return SDValue(); // BuildUDIV only operates on i32 or i64 uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getValue(); mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d); // Multiply the numerator (operand 0) by the magic value - SDOperand Q; + SDValue Q; if (isOperationLegal(ISD::MULHU, VT)) Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0), DAG.getConstant(magics.m, VT)); else if (isOperationLegal(ISD::UMUL_LOHI, VT)) - Q = SDOperand(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT), + Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT), N->getOperand(0), DAG.getConstant(magics.m, VT)).Val, 1); else - return SDOperand(); // No mulhu or equvialent + return SDValue(); // No mulhu or equvialent if (Created) Created->push_back(Q.Val); @@ -2304,7 +2304,7 @@ SDOperand TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, return DAG.getNode(ISD::SRL, VT, Q, DAG.getConstant(magics.s, getShiftAmountTy())); } else { - SDOperand NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q); + SDValue NPQ = DAG.getNode(ISD::SUB, VT, N->getOperand(0), Q); if (Created) Created->push_back(NPQ.Val); NPQ = DAG.getNode(ISD::SRL, VT, NPQ, diff --git a/lib/Target/ARM/ARMAddressingModes.h b/lib/Target/ARM/ARMAddressingModes.h index 0189c00cea..2378485cd4 100644 --- a/lib/Target/ARM/ARMAddressingModes.h +++ b/lib/Target/ARM/ARMAddressingModes.h @@ -46,7 +46,7 @@ namespace ARM_AM { } } - static inline ShiftOpc getShiftOpcForNode(SDOperand N) { + static inline ShiftOpc getShiftOpcForNode(SDValue N) { switch (N.getOpcode()) { default: return ARM_AM::no_shift; case ISD::SHL: return ARM_AM::lsl; diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index 2d1136f436..b79fffe16f 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -53,38 +53,38 @@ public: return "ARM Instruction Selection"; } - SDNode *Select(SDOperand Op); + SDNode *Select(SDValue Op); virtual void InstructionSelect(SelectionDAG &DAG); - bool SelectAddrMode2(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Offset, SDOperand &Opc); - bool SelectAddrMode2Offset(SDOperand Op, SDOperand N, - SDOperand &Offset, SDOperand &Opc); - bool SelectAddrMode3(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Offset, SDOperand &Opc); - bool SelectAddrMode3Offset(SDOperand Op, SDOperand N, - SDOperand &Offset, SDOperand &Opc); - bool SelectAddrMode5(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Offset); - - bool SelectAddrModePC(SDOperand Op, SDOperand N, SDOperand &Offset, - SDOperand &Label); - - bool SelectThumbAddrModeRR(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Offset); - bool SelectThumbAddrModeRI5(SDOperand Op, SDOperand N, unsigned Scale, - SDOperand &Base, SDOperand &OffImm, - SDOperand &Offset); - bool SelectThumbAddrModeS1(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &OffImm, SDOperand &Offset); - bool SelectThumbAddrModeS2(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &OffImm, SDOperand &Offset); - bool SelectThumbAddrModeS4(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &OffImm, SDOperand &Offset); - bool SelectThumbAddrModeSP(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &OffImm); - - bool SelectShifterOperandReg(SDOperand Op, SDOperand N, SDOperand &A, - SDOperand &B, SDOperand &C); + bool SelectAddrMode2(SDValue Op, SDValue N, SDValue &Base, + SDValue &Offset, SDValue &Opc); + bool SelectAddrMode2Offset(SDValue Op, SDValue N, + SDValue &Offset, SDValue &Opc); + bool SelectAddrMode3(SDValue Op, SDValue N, SDValue &Base, + SDValue &Offset, SDValue &Opc); + bool SelectAddrMode3Offset(SDValue Op, SDValue N, + SDValue &Offset, SDValue &Opc); + bool SelectAddrMode5(SDValue Op, SDValue N, SDValue &Base, + SDValue &Offset); + + bool SelectAddrModePC(SDValue Op, SDValue N, SDValue &Offset, + SDValue &Label); + + bool SelectThumbAddrModeRR(SDValue Op, SDValue N, SDValue &Base, + SDValue &Offset); + bool SelectThumbAddrModeRI5(SDValue Op, SDValue N, unsigned Scale, + SDValue &Base, SDValue &OffImm, + SDValue &Offset); + bool SelectThumbAddrModeS1(SDValue Op, SDValue N, SDValue &Base, + SDValue &OffImm, SDValue &Offset); + bool SelectThumbAddrModeS2(SDValue Op, SDValue N, SDValue &Base, + SDValue &OffImm, SDValue &Offset); + bool SelectThumbAddrModeS4(SDValue Op, SDValue N, SDValue &Base, + SDValue &OffImm, SDValue &Offset); + bool SelectThumbAddrModeSP(SDValue Op, SDValue N, SDValue &Base, + SDValue &OffImm); + + bool SelectShifterOperandReg(SDValue Op, SDValue N, SDValue &A, + SDValue &B, SDValue &C); // Include the pieces autogenerated from the target description. #include "ARMGenDAGISel.inc" @@ -98,9 +98,9 @@ void ARMDAGToDAGISel::InstructionSelect(SelectionDAG &DAG) { DAG.RemoveDeadNodes(); } -bool ARMDAGToDAGISel::SelectAddrMode2(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &Offset, - SDOperand &Opc) { +bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N, + SDValue &Base, SDValue &Offset, + SDValue &Opc) { if (N.getOpcode() == ISD::MUL) { if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { // X * [3,5,9] -> X + X * [2,4,8] etc. @@ -206,8 +206,8 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDOperand Op, SDOperand N, return true; } -bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDOperand Op, SDOperand N, - SDOperand &Offset, SDOperand &Opc) { +bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDValue Op, SDValue N, + SDValue &Offset, SDValue &Opc) { unsigned Opcode = Op.getOpcode(); ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) ? cast<LoadSDNode>(Op)->getAddressingMode() @@ -245,9 +245,9 @@ bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDOperand Op, SDOperand N, } -bool ARMDAGToDAGISel::SelectAddrMode3(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &Offset, - SDOperand &Opc) { +bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N, + SDValue &Base, SDValue &Offset, + SDValue &Opc) { if (N.getOpcode() == ISD::SUB) { // X - C is canonicalize to X + -C, no need to handle it here. Base = N.getOperand(0); @@ -295,8 +295,8 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDOperand Op, SDOperand N, return true; } -bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDOperand Op, SDOperand N, - SDOperand &Offset, SDOperand &Opc) { +bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDValue Op, SDValue N, + SDValue &Offset, SDValue &Opc) { unsigned Opcode = Op.getOpcode(); ISD::MemIndexedMode AM = (Opcode == ISD::LOAD) ? cast<LoadSDNode>(Op)->getAddressingMode() @@ -318,8 +318,8 @@ bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDOperand Op, SDOperand N, } -bool ARMDAGToDAGISel::SelectAddrMode5(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &Offset) { +bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N, + SDValue &Base, SDValue &Offset) { if (N.getOpcode() != ISD::ADD) { Base = N; if (N.getOpcode() == ISD::FrameIndex) { @@ -364,11 +364,11 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDOperand Op, SDOperand N, return true; } -bool ARMDAGToDAGISel::SelectAddrModePC(SDOperand Op, SDOperand N, - SDOperand &Offset, SDOperand &Label) { +bool ARMDAGToDAGISel::SelectAddrModePC(SDValue Op, SDValue N, + SDValue &Offset, SDValue &Label) { if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) { Offset = N.getOperand(0); - SDOperand N1 = N.getOperand(1); + SDValue N1 = N.getOperand(1); Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getValue(), MVT::i32); return true; @@ -376,14 +376,14 @@ bool ARMDAGToDAGISel::SelectAddrModePC(SDOperand Op, SDOperand N, return false; } -bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &Offset){ +bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue Op, SDValue N, + SDValue &Base, SDValue &Offset){ if (N.getOpcode() != ISD::ADD) { Base = N; // We must materialize a zero in a reg! Returning an constant here won't // work since its node is -1 so it won't get added to the selection queue. // Explicitly issue a tMOVri8 node! - Offset = SDOperand(CurDAG->getTargetNode(ARM::tMOVi8, MVT::i32, + Offset = SDValue(CurDAG->getTargetNode(ARM::tMOVi8, MVT::i32, CurDAG->getTargetConstant(0, MVT::i32)), 0); return true; } @@ -394,11 +394,11 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDOperand Op, SDOperand N, } bool -ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDOperand Op, SDOperand N, - unsigned Scale, SDOperand &Base, - SDOperand &OffImm, SDOperand &Offset) { +ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDValue Op, SDValue N, + unsigned Scale, SDValue &Base, + SDValue &OffImm, SDValue &Offset) { if (Scale == 4) { - SDOperand TmpBase, TmpOffImm; + SDValue TmpBase, TmpOffImm; if (SelectThumbAddrModeSP(Op, N, TmpBase, TmpOffImm)) return false; // We want to select tLDRspi / tSTRspi instead. if (N.getOpcode() == ARMISD::Wrapper && @@ -444,26 +444,26 @@ ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDOperand Op, SDOperand N, return true; } -bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &OffImm, - SDOperand &Offset) { +bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDValue Op, SDValue N, + SDValue &Base, SDValue &OffImm, + SDValue &Offset) { return SelectThumbAddrModeRI5(Op, N, 1, Base, OffImm, Offset); } -bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &OffImm, - SDOperand &Offset) { +bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDValue Op, SDValue N, + SDValue &Base, SDValue &OffImm, + SDValue &Offset) { return SelectThumbAddrModeRI5(Op, N, 2, Base, OffImm, Offset); } -bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &OffImm, - SDOperand &Offset) { +bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDValue Op, SDValue N, + SDValue &Base, SDValue &OffImm, + SDValue &Offset) { return SelectThumbAddrModeRI5(Op, N, 4, Base, OffImm, Offset); } -bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &OffImm) { +bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N, + SDValue &Base, SDValue &OffImm) { if (N.getOpcode() == ISD::FrameIndex) { int FI = cast<FrameIndexSDNode>(N)->getIndex(); Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); @@ -498,11 +498,11 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDOperand Op, SDOperand N, return false; } -bool ARMDAGToDAGISel::SelectShifterOperandReg(SDOperand Op, - SDOperand N, - SDOperand &BaseReg, - SDOperand &ShReg, - SDOperand &Opc) { +bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue Op, + SDValue N, + SDValue &BaseReg, + SDValue &ShReg, + SDValue &Opc) { ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N); // Don't match base register only case. That is matched to a separate @@ -523,12 +523,12 @@ bool ARMDAGToDAGISel::SelectShifterOperandReg(SDOperand Op, } /// getAL - Returns a ARMCC::AL immediate node. -static inline SDOperand getAL(SelectionDAG *CurDAG) { +static inline SDValue getAL(SelectionDAG *CurDAG) { return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32); } -SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { +SDNode *ARMDAGToDAGISel::Select(SDValue Op) { SDNode *N = Op.Val; if (N->isMachineOpcode()) @@ -548,7 +548,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { ARM_AM::getSOImmVal(~Val) == -1 && // MVN !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs. if (UseCP) { - SDOperand CPIdx = + SDValue CPIdx = CurDAG->getTargetConstantPool(ConstantInt::get(Type::Int32Ty, Val), TLI.getPointerTy()); @@ -557,7 +557,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { ResNode = CurDAG->getTargetNode(ARM::tLDRcp, MVT::i32, MVT::Other, CPIdx, CurDAG->getEntryNode()); else { - SDOperand Ops[] = { + SDValue Ops[] = { CPIdx, CurDAG->getRegister(0, MVT::i32), CurDAG->getTargetConstant(0, MVT::i32), @@ -567,7 +567,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { }; ResNode=CurDAG->getTargetNode(ARM::LDRcp, MVT::i32, MVT::Other, Ops, 6); } - ReplaceUses(Op, SDOperand(ResNode, 0)); + ReplaceUses(Op, SDValue(ResNode, 0)); return NULL; } @@ -577,12 +577,12 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { case ISD::FrameIndex: { // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm. int FI = cast<FrameIndexSDNode>(N)->getIndex(); - SDOperand TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); + SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); if (Subtarget->isThumb()) return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI, CurDAG->getTargetConstant(0, MVT::i32)); else { - SDOperand Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), + SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), CurDAG->getRegister(0, MVT::i32) }; return CurDAG->SelectNodeTo(N, ARM::ADDri, MVT::i32, Ops, 5); @@ -590,8 +590,8 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { } case ISD::ADD: { // Select add sp, c to tADDhirr. - SDOperand N0 = Op.getOperand(0); - SDOperand N1 = Op.getOperand(1); + SDValue N0 = Op.getOperand(0); + SDValue N1 = Op.getOperand(1); RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(Op.getOperand(0)); RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(Op.getOperand(1)); if (LHSR && LHSR->getReg() == ARM::SP) { @@ -612,20 +612,20 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { unsigned RHSV = C->getValue(); if (!RHSV) break; if (isPowerOf2_32(RHSV-1)) { // 2^n+1? - SDOperand V = Op.getOperand(0); + SDValue V = Op.getOperand(0); AddToISelQueue(V); unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV-1)); - SDOperand Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32), + SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32), CurDAG->getTargetConstant(ShImm, MVT::i32), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), CurDAG->getRegister(0, MVT::i32) }; return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7); } if (isPowerOf2_32(RHSV+1)) { // 2^n-1? - SDOperand V = Op.getOperand(0); + SDValue V = Op.getOperand(0); AddToISelQueue(V); unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV+1)); - SDOperand Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32), + SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32), CurDAG->getTargetConstant(ShImm, MVT::i32), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), CurDAG->getRegister(0, MVT::i32) }; @@ -641,7 +641,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { case ISD::UMUL_LOHI: { AddToISelQueue(Op.getOperand(0)); AddToISelQueue(Op.getOperand(1)); - SDOperand Ops[] = { Op.getOperand(0), Op.getOperand(1), + SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), CurDAG->getRegister(0, MVT::i32) }; return CurDAG->getTargetNode(ARM::UMULL, MVT::i32, MVT::i32, Ops, 5); @@ -649,7 +649,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { case ISD::SMUL_LOHI: { AddToISelQueue(Op.getOperand(0)); AddToISelQueue(Op.getOperand(1)); - SDOperand Ops[] = { Op.getOperand(0), Op.getOperand(1), + SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1), getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), CurDAG->getRegister(0, MVT::i32) }; return CurDAG->getTargetNode(ARM::SMULL, MVT::i32, MVT::i32, Ops, 5); @@ -659,7 +659,7 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { ISD::MemIndexedMode AM = LD->getAddressingMode(); MVT LoadedVT = LD->getMemoryVT(); if (AM != ISD::UNINDEXED) { - SDOperand Offset, AMOpc; + SDValue Offset, AMOpc; bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC); unsigned Opcode = 0; bool Match = false; @@ -688,12 +688,12 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { } if (Match) { - SDOperand Chain = LD->getChain(); - SDOperand Base = LD->getBasePtr(); + SDValue Chain = LD->getChain(); + SDValue Base = LD->getBasePtr(); AddToISelQueue(Chain); AddToISelQueue(Base); AddToISelQueue(Offset); - SDOperand Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), + SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG), CurDAG->getRegister(0, MVT::i32), Chain }; return CurDAG->getTargetNode(Opcode, MVT::i32, MVT::i32, MVT::Other, Ops, 6); @@ -712,11 +712,11 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { // Pattern complexity = 6 cost = 1 size = 0 unsigned Opc = Subtarget->isThumb() ? ARM::tBcc : ARM::Bcc; - SDOperand Chain = Op.getOperand(0); - SDOperand N1 = Op.getOperand(1); - SDOperand N2 = Op.getOperand(2); - SDOperand N3 = Op.getOperand(3); - SDOperand InFlag = Op.getOperand(4); + SDValue Chain = Op.getOperand(0); + SDValue N1 = Op.getOperand(1); + SDValue N2 = Op.getOperand(2); + SDValue N3 = Op.getOperand(3); + SDValue InFlag = Op.getOperand(4); assert(N1.getOpcode() == ISD::BasicBlock); assert(N2.getOpcode() == ISD::Constant); assert(N3.getOpcode() == ISD::Register); @@ -724,35 +724,35 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { AddToISelQueue(Chain); AddToISelQueue(N1); AddToISelQueue(InFlag); - SDOperand Tmp2 = CurDAG->getTargetConstant(((unsigned) + SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) cast<ConstantSDNode>(N2)->getValue()), MVT::i32); - SDOperand Ops[] = { N1, Tmp2, N3, Chain, InFlag }; + SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag }; SDNode *ResNode = CurDAG->getTargetNode(Opc, MVT::Other, MVT::Flag, Ops, 5); - Chain = SDOperand(ResNode, 0); + Chain = SDValue(ResNode, 0); if (Op.Val->getNumValues() == 2) { - InFlag = SDOperand(ResNode, 1); - ReplaceUses(SDOperand(Op.Val, 1), InFlag); + InFlag = SDValue(ResNode, 1); + ReplaceUses(SDValue(Op.Val, 1), InFlag); } - ReplaceUses(SDOperand(Op.Val, 0), SDOperand(Chain.Val, Chain.ResNo)); + ReplaceUses(SDValue(Op.Val, 0), SDValue(Chain.Val, Chain.ResNo)); return NULL; } case ARMISD::CMOV: { bool isThumb = Subtarget->isThumb(); MVT VT = Op.getValueType(); - SDOperand N0 = Op.getOperand(0); - SDOperand N1 = Op.getOperand(1); - SDOperand N2 = Op.getOperand(2); - SDOperand N3 = Op.getOperand(3); - SDOperand InFlag = Op.getOperand(4); + SDValue N0 = Op.getOperand(0); + SDValue N1 = Op.getOperand(1); + SDValue N2 = Op.getOperand(2); + SDValue N3 = Op.getOperand(3); + SDValue InFlag = Op.getOperand(4); assert(N2.getOpcode() == ISD::Constant); assert(N3.getOpcode() == ISD::Register); // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc) // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc) // Pattern complexity = 18 cost = 1 size = 0 - SDOperand CPTmp0; - SDOperand CPTmp1; - SDOperand CPTmp2; + SDValue CPTmp0; + SDValue CPTmp1; + SDValue CPTmp2; if (!isThumb && VT == MVT::i32 && SelectShifterOperandReg(Op, N1, CPTmp0, CPTmp1, CPTmp2)) { AddToISelQueue(N0); @@ -760,9 +760,9 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { AddToISelQueue(CPTmp1); AddToISelQueue(CPTmp2); AddToISelQueue(InFlag); - SDOperand Tmp2 = CurDAG->getTargetConstant(((unsigned) + SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) cast<ConstantSDNode>(N2)->getValue()), MVT::i32); - SDOperand Ops[] = { N0, CPTmp0, CPTmp1, CPTmp2, Tmp2, N3, InFlag }; + SDValue Ops[] = { N0, CPTmp0, CPTmp1, CPTmp2, Tmp2, N3, InFlag }; return CurDAG->SelectNodeTo(Op.Val, ARM::MOVCCs, MVT::i32, Ops, 7); } @@ -777,12 +777,12 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { Predicate_so_imm(N3.Val)) { AddToISelQueue(N0); AddToISelQueue(InFlag); - SDOperand Tmp1 = CurDAG->getTargetConstant(((unsigned) + SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned) cast<ConstantSDNode>(N1)->getValue()), MVT::i32); Tmp1 = Transform_so_imm_XFORM(Tmp1.Val); - SDOperand Tmp2 = CurDAG->getTargetConstant(((unsigned) + SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) cast<ConstantSDNode>(N2)->getValue()), MVT::i32); - SDOperand Ops[] = { N0, Tmp1, Tmp2, N3, InFlag }; + SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag }; return CurDAG->SelectNodeTo(Op.Val, ARM::MOVCCi, MVT::i32, Ops, 5); } @@ -798,9 +798,9 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { AddToISelQueue(N0); AddToISelQueue(N1); AddToISelQueue(InFlag); - SDOperand Tmp2 = CurDAG->getTargetConstant(((unsigned) + SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) cast<ConstantSDNode>(N2)->getValue()), MVT::i32); - SDOperand Ops[] = { N0, N1, Tmp2, N3, InFlag }; + SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag }; unsigned Opc = 0; switch (VT.getSimpleVT()) { default: assert(false && "Illegal conditional move type!"); @@ -819,20 +819,20 @@ SDNode *ARMDAGToDAGISel::Select(SDOperand Op) { } case ARMISD::CNEG: { MVT VT = Op.getValueType(); - SDOperand N0 = Op.getOperand(0); - SDOperand N1 = Op.getOperand(1); - SDOperand N2 = Op.getOperand(2); - SDOperand N3 = Op.getOperand(3); - SDOperand InFlag = Op.getOperand(4); + SDValue N0 = Op.getOperand(0); + SDValue N1 = Op.getOperand(1); + SDValue N2 = Op.getOperand(2); + SDValue N3 = Op.getOperand(3); + SDValue InFlag = Op.getOperand(4); assert(N2.getOpcode() == ISD::Constant); assert(N3.getOpcode() == ISD::Register); AddToISelQueue(N0); AddToISelQueue(N1); AddToISelQueue(InFlag); - SDOperand Tmp2 = CurDAG->getTargetConstant(((unsigned) + SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned) cast<ConstantSDNode>(N2)->getValue()), MVT::i32); - SDOperand Ops[] = { N0, N1, Tmp2, N3, InFlag }; + SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag }; unsigned Opc = 0; switch (VT.getSimpleVT()) { default: assert(false && "Illegal conditional move type!"); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index afc7ebc4c9..c696832fda 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -399,13 +399,13 @@ HowToPassArgument(MVT ObjectVT, unsigned NumGPRs, /// LowerCALL - Lowering a ISD::CALL node into a callseq_start <- /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter /// nodes. -SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { +SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { MVT RetVT= Op.Val->getValueType(0); - SDOperand Chain = Op.getOperand(0); + SDValue Chain = Op.getOperand(0); unsigned CallConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); assert((CallConv == CallingConv::C || CallConv == CallingConv::Fast) && "unknown calling convention"); - SDOperand Callee = Op.getOperand(4); + SDValue Callee = Op.getOperand(4); unsigned NumOps = (Op.getNumOperands() - 5) / 2; unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot unsigned NumGPRs = 0; // GPRs used for parameter passing. @@ -433,17 +433,17 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, MVT::i32)); - SDOperand StackPtr = DAG.getRegister(ARM::SP, MVT::i32); + SDValue StackPtr = DAG.getRegister(ARM::SP, MVT::i32); static const unsigned GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; NumGPRs = 0; - std::vector<std::pair<unsigned, SDOperand> > RegsToPass; - std::vector<SDOperand> MemOpChains; + std::vector<std::pair<unsigned, SDValue> > RegsToPass; + std::vector<SDValue> MemOpChains; for (unsigned i = 0; i != NumOps; ++i) { - SDOperand Arg = Op.getOperand(5+2*i); + SDValue Arg = Op.getOperand(5+2*i); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); MVT ArgVT = Arg.getValueType(); @@ -467,22 +467,22 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Arg))); break; case MVT::i64: { - SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Arg, + SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Arg, DAG.getConstant(0, getPointerTy())); - SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Arg, + SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Arg, DAG.getConstant(1, getPointerTy())); RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Lo)); if (ObjGPRs == 2) RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1], Hi)); else { - SDOperand PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType()); + SDValue PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType()); PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); MemOpChains.push_back(DAG.getStore(Chain, Hi, PtrOff, NULL, 0)); } break; } case MVT::f64: { - SDOperand Cvt = DAG.getNode(ARMISD::FMRRD, + SDValue Cvt = DAG.getNode(ARMISD::FMRRD, DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1); RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Cvt)); @@ -490,7 +490,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1], Cvt.getValue(1))); else { - SDOperand PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType()); + SDValue PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType()); PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); MemOpChains.push_back(DAG.getStore(Chain, Cvt.getValue(1), PtrOff, NULL, 0)); @@ -500,7 +500,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { } } else { assert(ObjSize != 0); - SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); + SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); } @@ -515,7 +515,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { // Build a sequence of copy-to-reg nodes chained together with token chain // and flag operands which copy the outgoing args into the appropriate regs. - SDOperand InFlag; + SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, InFlag); @@ -542,10 +542,10 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { if (isARMFunc && Subtarget->isThumb() && !Subtarget->hasV5TOps()) { ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex, ARMCP::CPStub, 4); - SDOperand CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 2); + SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 2); CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr); Callee = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), CPAddr, NULL, 0); - SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); + SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); Callee = DAG.getNode(ARMISD::PIC_ADD, getPointerTy(), Callee, PICLabel); } else Callee = DAG.getTargetGlobalAddress(GV, getPointerTy()); @@ -559,10 +559,10 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { if (isARMFunc && Subtarget->isThumb() && !Subtarget->hasV5TOps()) { ARMConstantPoolValue *CPV = new ARMConstantPoolValue(Sym, ARMPCLabelIndex, ARMCP::CPStub, 4); - SDOperand CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 2); + SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 2); CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr); Callee = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), CPAddr, NULL, 0); - SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); + SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); Callee = DAG.getNode(ARMISD::PIC_ADD, getPointerTy(), Callee, PICLabel); } else Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy()); @@ -587,7 +587,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { InFlag = Chain.getValue(1); } - std::vector<SDOperand> Ops; + std::vector<SDValue> Ops; Ops.push_back(Chain); Ops.push_back(Callee); @@ -611,7 +611,7 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { if (RetVT != MVT::Other) InFlag = Chain.getValue(1); - std::vector<SDOperand> ResultVals; + std::vector<SDValue> ResultVals; // If the call has results, copy the values out of the ret val registers. switch (RetVT.getSimpleVT()) { @@ -634,8 +634,8 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { Chain.getValue(0))); break; case MVT::f64: { - SDOperand Lo = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag); - SDOperand Hi = DAG.getCopyFromReg(Lo, ARM::R1, MVT::i32, Lo.getValue(2)); + SDValue Lo = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag); + SDValue Hi = DAG.getCopyFromReg(Lo, ARM::R1, MVT::i32, Lo.getValue(2)); ResultVals.push_back(DAG.getNode(ARMISD::FMDRR, MVT::f64, Lo, Hi)); break; } @@ -645,19 +645,19 @@ SDOperand ARMTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { return Chain; ResultVals.push_back(Chain); - SDOperand Res = DAG.getMergeValues(&ResultVals[0], ResultVals.size()); + SDValue Res = DAG.getMergeValues(&ResultVals[0], ResultVals.size()); return Res.getValue(Op.ResNo); } -static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { - SDOperand Copy; - SDOperand Chain = Op.getOperand(0); +static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) { + SDValue Copy; + SDValue Chain = Op.getOperand(0); switch(Op.getNumOperands()) { default: assert(0 && "Do not know how to return this many arguments!"); abort(); case 1: { - SDOperand LR = DAG.getRegister(ARM::LR, MVT::i32); + SDValue LR = DAG.getRegister(ARM::LR, MVT::i32); return DAG.getNode(ARMISD::RET_FLAG, MVT::Other, Chain); } case 3: @@ -668,16 +668,16 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is // available. Op = DAG.getNode(ARMISD::FMRRD, DAG.getVTList(MVT::i32, MVT::i32), &Op,1); - SDOperand Sign = DAG.getConstant(0, MVT::i32); + SDValue Sign = DAG.getConstant(0, MVT::i32); return DAG.getNode(ISD::RET, MVT::Other, Chain, Op, Sign, Op.getValue(1), Sign); } - Copy = DAG.getCopyToReg(Chain, ARM::R0, Op, SDOperand()); + Copy = DAG.getCopyToReg(Chain, ARM::R0, Op, SDValue()); if (DAG.getMachineFunction().getRegInfo().liveout_empty()) DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R0); break; case 5: - Copy = DAG.getCopyToReg(Chain, ARM::R1, Op.getOperand(3), SDOperand()); + Copy = DAG.getCopyToReg(Chain, ARM::R1, Op.getOperand(3), SDValue()); Copy = DAG.getCopyToReg(Copy, ARM::R0, Op.getOperand(1), Copy.getValue(1)); // If we haven't noted the R0+R1 are live out, do so now. if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { @@ -686,7 +686,7 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { } break; case 9: // i128 -> 4 regs - Copy = DAG.getCopyToReg(Chain, ARM::R3, Op.getOperand(7), SDOperand()); + Copy = DAG.getCopyToReg(Chain, ARM::R3, Op.getOperand(7), SDValue()); Copy = DAG.getCopyToReg(Copy , ARM::R2, Op.getOperand(5), Copy.getValue(1)); Copy = DAG.getCopyToReg(Copy , ARM::R1, Op.getOperand(3), Copy.getValue(1)); Copy = DAG.getCopyToReg(Copy , ARM::R0, Op.getOperand(1), Copy.getValue(1)); @@ -711,10 +711,10 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only // be used to form addressing mode. These wrapped nodes will be selected // into MOVi. -static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); - SDOperand Res; + SDValue Res; if (CP->isMachineConstantPoolEntry()) Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlignment()); @@ -725,7 +725,7 @@ static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { } // Lower ISD::GlobalTLSAddress using the "general dynamic" model -SDOperand +SDValue ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG) { MVT PtrVT = getPointerTy(); @@ -733,12 +733,12 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GA->getGlobal(), ARMPCLabelIndex, ARMCP::CPValue, PCAdj, "tlsgd", true); - SDOperand Argument = DAG.getTargetConstantPool(CPV, PtrVT, 2); + SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 2); Argument = DAG.getNode(ARMISD::Wrapper, MVT::i32, Argument); Argument = DAG.getLoad(PtrVT, DAG.getEntryNode(), Argument, NULL, 0); - SDOperand Chain = Argument.getValue(1); + SDValue Chain = Argument.getValue(1); - SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); + SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); Argument = DAG.getNode(ARMISD::PIC_ADD, PtrVT, Argument, PICLabel); // call __tls_get_addr. @@ -747,7 +747,7 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, Entry.Node = Argument; Entry.Ty = (const Type *) Type::Int32Ty; Args.push_back(Entry); - std::pair<SDOperand, SDOperand> CallResult = + std::pair<SDValue, SDValue> CallResult = LowerCallTo(Chain, (const Type *) Type::Int32Ty, false, false, false, CallingConv::C, false, DAG.getExternalSymbol("__tls_get_addr", PtrVT), Args, DAG); @@ -756,15 +756,15 @@ ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, // Lower ISD::GlobalTLSAddress using the "initial exec" or // "local exec" model. -SDOperand +SDValue ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, SelectionDAG &DAG) { GlobalValue *GV = GA->getGlobal(); - SDOperand Offset; - SDOperand Chain = DAG.getEntryNode(); + SDValue Offset; + SDValue Chain = DAG.getEntryNode(); MVT PtrVT = getPointerTy(); // Get the Thread Pointer - SDOperand ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, PtrVT); + SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, PtrVT); if (GV->isDeclaration()){ // initial exec model @@ -777,7 +777,7 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, Offset = DAG.getLoad(PtrVT, Chain, Offset, NULL, 0); Chain = Offset.getValue(1); - SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); + SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); Offset = DAG.getNode(ARMISD::PIC_ADD, PtrVT, Offset, PICLabel); Offset = DAG.getLoad(PtrVT, Chain, Offset, NULL, 0); @@ -795,8 +795,8 @@ ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA, return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); } -SDOperand -ARMTargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { +SDValue +ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) { // TODO: implement the "local dynamic" model assert(Subtarget->isTargetELF() && "TLS not implemented for non-ELF targets"); @@ -809,7 +809,7 @@ ARMTargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { return LowerToTLSExecModels(GA, DAG); } -SDOperand ARMTargetLowering::LowerGlobalAddressELF(SDOperand Op, +SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = getPointerTy(); GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); @@ -818,17 +818,17 @@ SDOperand ARMTargetLowering::LowerGlobalAddressELF(SDOperand Op, bool UseGOTOFF = GV->hasInternalLinkage() || GV->hasHiddenVisibility(); ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMCP::CPValue, UseGOTOFF ? "GOTOFF":"GOT"); - SDOperand CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 2); + SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 2); CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr); - SDOperand Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0); - SDOperand Chain = Result.getValue(1); - SDOperand GOT = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, PtrVT); + SDValue Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0); + SDValue Chain = Result.getValue(1); + SDValue GOT = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, PtrVT); Result = DAG.getNode(ISD::ADD, PtrVT, Result, GOT); if (!UseGOTOFF) Result = DAG.getLoad(PtrVT, Chain, Result, NULL, 0); return Result; } else { - SDOperand CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 2); + SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 2); CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr); return DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0); } @@ -842,13 +842,13 @@ static bool GVIsIndirectSymbol(GlobalValue *GV, Reloc::Model RelocM) { (GV->isDeclaration() && !GV->hasNotBeenReadFromBitcode())); } -SDOperand ARMTargetLowering::LowerGlobalAddressDarwin(SDOperand Op, +SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = getPointerTy(); GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); Reloc::Model RelocM = getTargetMachine().getRelocationModel(); bool IsIndirect = GVIsIndirectSymbol(GV, RelocM); - SDOperand CPAddr; + SDValue CPAddr; if (RelocM == Reloc::Static) CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 2); else { @@ -862,11 +862,11 @@ SDOperand ARMTargetLowering::LowerGlobalAddressDarwin(SDOperand Op, } CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr); - SDOperand Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0); - SDOperand Chain = Result.getValue(1); + SDValue Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0); + SDValue Chain = Result.getValue(1); if (RelocM == Reloc::PIC_) { - SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); + SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); Result = DAG.getNode(ARMISD::PIC_ADD, PtrVT, Result, PICLabel); } if (IsIndirect) @@ -875,7 +875,7 @@ SDOperand ARMTargetLowering::LowerGlobalAddressDarwin(SDOperand Op, return Result; } -SDOperand ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDOperand Op, +SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG){ assert(Subtarget->isTargetELF() && "GLOBAL OFFSET TABLE not implemented for non-ELF targets"); @@ -884,40 +884,40 @@ SDOperand ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDOperand Op, ARMConstantPoolValue *CPV = new ARMConstantPoolValue("_GLOBAL_OFFSET_TABLE_", ARMPCLabelIndex, ARMCP::CPValue, PCAdj); - SDOperand CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 2); + SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 2); CPAddr = DAG.getNode(ARMISD::Wrapper, MVT::i32, CPAddr); - SDOperand Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0); - SDOperand PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); + SDValue Result = DAG.getLoad(PtrVT, DAG.getEntryNode(), CPAddr, NULL, 0); + SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32); return DAG.getNode(ARMISD::PIC_ADD, PtrVT, Result, PICLabel); } -static SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); switch (IntNo) { - default: return SDOperand(); // Don't custom lower most intrinsics. + default: return SDValue(); // Don't custom lower most intrinsics. case Intrinsic::arm_thread_pointer: return DAG.getNode(ARMISD::THREAD_POINTER, PtrVT); } } -static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, +static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, unsigned VarArgsFrameIndex) { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); - SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); + SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); } -static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG, +static SDValue LowerFORMAL_ARGUMENT(SDValue Op, SelectionDAG &DAG, unsigned ArgNo, unsigned &NumGPRs, unsigned &ArgOffset) { MachineFunction &MF = DAG.getMachineFunction(); MVT ObjectVT = Op.getValue(ArgNo).getValueType(); - SDOperand Root = Op.getOperand(0); - std::vector<SDOperand> ArgValues; + SDValue Root = Op.getOperand(0); + std::vector<SDValue> ArgValues; MachineRegisterInfo &RegInfo = MF.getRegInfo(); static const unsigned GPRArgRegs[] = { @@ -935,7 +935,7 @@ static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG, NumGPRs += GPRPad; ArgOffset += StackPad; - SDOperand ArgValue; + SDValue ArgValue; if (ObjGPRs == 1) { unsigned VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass); RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg); @@ -949,7 +949,7 @@ static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG, VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass); RegInfo.addLiveIn(GPRArgRegs[NumGPRs+1], VReg); - SDOperand ArgValue2 = DAG.getCopyFromReg(Root, VReg, MVT::i32); + SDValue ArgValue2 = DAG.getCopyFromReg(Root, VReg, MVT::i32); assert(ObjectVT != MVT::i64 && "i64 should already be lowered"); ArgValue = DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2); @@ -959,11 +959,11 @@ static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG, if (ObjSize) { MachineFrameInfo *MFI = MF.getFrameInfo(); int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); - SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32); + SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); if (ObjGPRs == 0) ArgValue = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); else { - SDOperand ArgValue2 = DAG.getLoad(MVT::i32, Root, FIN, NULL, 0); + SDValue ArgValue2 = DAG.getLoad(MVT::i32, Root, FIN, NULL, 0); assert(ObjectVT != MVT::i64 && "i64 should already be lowered"); ArgValue = DAG.getNode(ARMISD::FMDRR, MVT::f64, ArgValue, ArgValue2); } @@ -974,10 +974,10 @@ static SDOperand LowerFORMAL_ARGUMENT(SDOperand Op, SelectionDAG &DAG, return ArgValue; } -SDOperand -ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { - std::vector<SDOperand> ArgValues; - SDOperand Root = Op.getOperand(0); +SDValue +ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { + std::vector<SDValue> ArgValues; + SDValue Root = Op.getOperand(0); unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot unsigned NumGPRs = 0; // GPRs used for parameter passing. @@ -1006,14 +1006,14 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { AFI->setVarArgsRegSaveSize(VARegSaveSize); VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset + VARegSaveSize - VARegSize); - SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); + SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); - SmallVector<SDOperand, 4> MemOps; + SmallVector<SDValue, 4> MemOps; for (; NumGPRs < 4; ++NumGPRs) { unsigned VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass); RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg); - SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i32); - SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); + SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::i32); + SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getConstant(4, getPointerTy())); @@ -1034,13 +1034,13 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { } /// isFloatingPointZero - Return true if this is +0.0. -static bool isFloatingPointZero(SDOperand Op) { +static bool isFloatingPointZero(SDValue Op) { if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) return CFP->getValueAPF().isPosZero(); else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) { // Maybe this has already been legalized into the constant pool? if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { - SDOperand WrapperOp = Op.getOperand(1).getOperand(0); + SDValue WrapperOp = Op.getOperand(1).getOperand(0); if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp)) if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) return CFP->getValueAPF().isPosZero(); @@ -1056,8 +1056,8 @@ static bool isLegalCmpImmediate(unsigned C, bool isThumb) { /// Returns appropriate ARM CMP (cmp) and corresponding condition code for /// the given operands. -static SDOperand getARMCmp(SDOperand LHS, SDOperand RHS, ISD::CondCode CC, - SDOperand &ARMCC, SelectionDAG &DAG, bool isThumb) { +static SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, + SDValue &ARMCC, SelectionDAG &DAG, bool isThumb) { if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.Val)) { unsigned C = RHSC->getValue(); if (!isLegalCmpImmediate(C, isThumb)) { @@ -1115,8 +1115,8 @@ static SDOperand getARMCmp(SDOperand LHS, SDOperand RHS, ISD::CondCode CC, } /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. -static SDOperand getVFPCmp(SDOperand LHS, SDOperand RHS, SelectionDAG &DAG) { - SDOperand Cmp; +static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG) { + SDValue Cmp; if (!isFloatingPointZero(RHS)) Cmp = DAG.getNode(ARMISD::CMPFP, MVT::Flag, LHS, RHS); else @@ -1124,19 +1124,19 @@ static SDOperand getVFPCmp(SDOperand LHS, SDOperand RHS, SelectionDAG &DAG) { return DAG.getNode(ARMISD::FMSTAT, MVT::Flag, Cmp); } -static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG, +static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST) { MVT VT = Op.getValueType(); - SDOperand LHS = Op.getOperand(0); - SDOperand RHS = Op.getOperand(1); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); - SDOperand TrueVal = Op.getOperand(2); - SDOperand FalseVal = Op.getOperand(3); + SDValue TrueVal = Op.getOperand(2); + SDValue FalseVal = Op.getOperand(3); if (LHS.getValueType() == MVT::i32) { - SDOperand ARMCC; - SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32); - SDOperand Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb()); + SDValue ARMCC; + SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); + SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb()); return DAG.getNode(ARMISD::CMOV, VT, FalseVal, TrueVal, ARMCC, CCR, Cmp); } @@ -1144,32 +1144,32 @@ static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG, if (FPCCToARMCC(CC, CondCode, CondCode2)) std::swap(TrueVal, FalseVal); - SDOperand ARMCC = DAG.getConstant(CondCode, MVT::i32); - SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32); - SDOperand Cmp = getVFPCmp(LHS, RHS, DAG); - SDOperand Result = DAG.getNode(ARMISD::CMOV, VT, FalseVal, TrueVal, + SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32); + SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); + SDValue Cmp = getVFPCmp(LHS, RHS, DAG); + SDValue Result = DAG.getNode(ARMISD::CMOV, VT, FalseVal, TrueVal, ARMCC, CCR, Cmp); if (CondCode2 != ARMCC::AL) { - SDOperand ARMCC2 = DAG.getConstant(CondCode2, MVT::i32); + SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32); // FIXME: Needs another CMP because flag can have but one use. - SDOperand Cmp2 = getVFPCmp(LHS, RHS, DAG); + SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG); Result = DAG.getNode(ARMISD::CMOV, VT, Result, TrueVal, ARMCC2, CCR, Cmp2); } return Result; } -static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG, +static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST) { - SDOperand Chain = Op.getOperand(0); + SDValue Chain = Op.getOperand(0); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); - SDOperand LHS = Op.getOperand(2); - SDOperand RHS = Op.getOperand(3); - SDOperand Dest = Op.getOperand(4); + SDValue LHS = Op.getOperand(2); + SDValue RHS = Op.getOperand(3); + SDValue Dest = Op.getOperand(4); if (LHS.getValueType() == MVT::i32) { - SDOperand ARMCC; - SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32); - SDOperand Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb()); + SDValue ARMCC; + SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); + SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb()); return DAG.getNode(ARMISD::BRCOND, MVT::Other, Chain, Dest, ARMCC, CCR,Cmp); } @@ -1179,33 +1179,33 @@ static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG, // Swap the LHS/RHS of the comparison if needed. std::swap(LHS, RHS); - SDOperand Cmp = getVFPCmp(LHS, RHS, DAG); - SDOperand ARMCC = DAG.getConstant(CondCode, MVT::i32); - SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32); + SDValue Cmp = getVFPCmp(LHS, RHS, DAG); + SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32); + SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Flag); - SDOperand Ops[] = { Chain, Dest, ARMCC, CCR, Cmp }; - SDOperand Res = DAG.getNode(ARMISD::BRCOND, VTList, Ops, 5); + SDValue Ops[] = { Chain, Dest, ARMCC, CCR, Cmp }; + SDValue Res = DAG.getNode(ARMISD::BRCOND, VTList, Ops, 5); if (CondCode2 != ARMCC::AL) { ARMCC = DAG.getConstant(CondCode2, MVT::i32); - SDOperand Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) }; + SDValue Ops[] = { Res, Dest, ARMCC, CCR, Res.getValue(1) }; Res = DAG.getNode(ARMISD::BRCOND, VTList, Ops, 5); } return Res; } -SDOperand ARMTargetLowering::LowerBR_JT(SDOperand Op, SelectionDAG &DAG) { - SDOperand Chain = Op.getOperand(0); - SDOperand Table = Op.getOperand(1); - SDOperand Index = Op.getOperand(2); +SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) { + SDValue Chain = Op.getOperand(0); + SDValue Table = Op.getOperand(1); + SDValue Index = Op.getOperand(2); MVT PTy = getPointerTy(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Table); ARMFunctionInfo *AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>(); - SDOperand UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); - SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); + SDValue UId = DAG.getConstant(AFI->createJumpTableUId(), PTy); + SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); Table = DAG.getNode(ARMISD::WrapperJT, MVT::i32, JTI, UId); Index = DAG.getNode(ISD::MUL, PTy, Index, DAG.getConstant(4, PTy)); - SDOperand Addr = DAG.getNode(ISD::ADD, PTy, Index, Table); + SDValue Addr = DAG.getNode(ISD::ADD, PTy, Index, Table); bool isPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_; Addr = DAG.getLoad(isPIC ? (MVT)MVT::i32 : PTy, Chain, Addr, NULL, 0); @@ -1215,14 +1215,14 @@ SDOperand ARMTargetLowering::LowerBR_JT(SDOperand Op, SelectionDAG &DAG) { return DAG.getNode(ARMISD::BR_JT, MVT::Other, Chain, Addr, JTI, UId); } -static SDOperand LowerFP_TO_INT(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) { unsigned Opc = Op.getOpcode() == ISD::FP_TO_SINT ? ARMISD::FTOSI : ARMISD::FTOUI; Op = DAG.getNode(Opc, MVT::f32, Op.getOperand(0)); return DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op); } -static SDOperand LowerINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); unsigned Opc = Op.getOpcode() == ISD::SINT_TO_FP ? ARMISD::SITOF : ARMISD::UITOF; @@ -1231,39 +1231,39 @@ static SDOperand LowerINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { return DAG.getNode(Opc, VT, Op); } -static SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) { // Implement fcopysign with a fabs and a conditional fneg. - SDOperand Tmp0 = Op.getOperand(0); - SDOperand Tmp1 = Op.getOperand(1); + SDValue Tmp0 = Op.getOperand(0); + SDValue Tmp1 = Op.getOperand(1); MVT VT = Op.getValueType(); MVT SrcVT = Tmp1.getValueType(); - SDOperand AbsVal = DAG.getNode(ISD::FABS, VT, Tmp0); - SDOperand Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG); - SDOperand ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32); - SDOperand CCR = DAG.getRegister(ARM::CPSR, MVT::i32); + SDValue AbsVal = DAG.getNode(ISD::FABS, VT, Tmp0); + SDValue Cmp = getVFPCmp(Tmp1, DAG.getConstantFP(0.0, SrcVT), DAG); + SDValue ARMCC = DAG.getConstant(ARMCC::LT, MVT::i32); + SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); return DAG.getNode(ARMISD::CNEG, VT, AbsVal, AbsVal, ARMCC, CCR, Cmp); } -SDOperand +SDValue ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, - SDOperand Chain, - SDOperand Dst, SDOperand Src, - SDOperand Size, unsigned Align, + SDValue Chain, + SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, bool AlwaysInline, const Value *DstSV, uint64_t DstSVOff, const Value *SrcSV, uint64_t SrcSVOff){ // Do repeated 4-byte loads and stores. To be improved. // This requires 4-byte alignment. if ((Align & 3) != 0) - return SDOperand(); + return SDValue(); // This requires the copy size to be a constant, preferrably // within a subtarget-specific limit. ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); if (!ConstantSize) - return SDOperand(); + return SDValue(); uint64_t SizeVal = ConstantSize->getValue(); if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold()) - return SDOperand(); + return SDValue(); unsigned BytesLeft = SizeVal & 3; unsigned NumMemOps = SizeVal >> 2; @@ -1272,8 +1272,8 @@ ARMTargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, unsigned VTSize = 4; unsigned i = 0; const unsigned MAX_LOADS_IN_LDM = 6; - SDOperand TFOps[MAX_LOADS_IN_LDM]; - SDOperand Loads[MAX_LOADS_IN_LDM]; + SDValue TFOps[MAX_LOADS_IN_LDM]; + SDValue Loads[MAX_LOADS_IN_LDM]; uint64_t SrcOff = 0, DstOff = 0; // Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the @@ -1357,8 +1357,8 @@ static SDNode *ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) { assert(N->getValueType(0) == MVT::i64 && N->getOperand(0).getValueType() == MVT::f64); - SDOperand Op = N->getOperand(0); - SDOperand Cvt = DAG.getNode(ARMISD::FMRRD, DAG.getVTList(MVT::i32, MVT::i32), + SDValue Op = N->getOperand(0); + SDValue Cvt = DAG.getNode(ARMISD::FMRRD, DAG.getVTList(MVT::i32, MVT::i32), &Op, 1); // Merge the pieces into a single i64 value. @@ -1379,9 +1379,9 @@ static SDNode *ExpandSRx(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST) { if (ST->isThumb()) return 0; // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr. - SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0), + SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0), DAG.getConstant(0, MVT::i32)); - SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0), + SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, N->getOperand(0), DAG.getConstant(1, MVT::i32)); // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and @@ -1397,7 +1397,7 @@ static SDNode *ExpandSRx(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST) { } -SDOperand ARMTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { +SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Don't know how to custom lower this!"); abort(); case ISD::ConstantPool: return LowerConstantPool(Op, DAG); @@ -1424,11 +1424,11 @@ SDOperand ARMTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { // FIXME: Remove these when LegalizeDAGTypes lands. - case ISD::BIT_CONVERT: return SDOperand(ExpandBIT_CONVERT(Op.Val, DAG), 0); + case ISD::BIT_CONVERT: return SDValue(ExpandBIT_CONVERT(Op.Val, DAG), 0); case ISD::SRL: - case ISD::SRA: return SDOperand(ExpandSRx(Op.Val, DAG,Subtarget),0); + case ISD::SRA: return SDValue(ExpandSRx(Op.Val, DAG,Subtarget),0); } - return SDOperand(); + return SDValue(); } @@ -1516,23 +1516,23 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, //===----------------------------------------------------------------------===// /// PerformFMRRDCombine - Target-specific dag combine xforms for ARMISD::FMRRD. -static SDOperand PerformFMRRDCombine(SDNode *N, +static SDValue PerformFMRRDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { // fmrrd(fmdrr x, y) -> x,y - SDOperand InDouble = N->getOperand(0); + SDValue InDouble = N->getOperand(0); if (InDouble.getOpcode() == ARMISD::FMDRR) return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); - return SDOperand(); + return SDValue(); } -SDOperand ARMTargetLowering::PerformDAGCombine(SDNode *N, +SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { switch (N->getOpcode()) { default: break; case ARMISD::FMRRD: return PerformFMRRDCombine(N, DCI); } - return SDOperand(); + return SDValue(); } @@ -1654,8 +1654,8 @@ bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, static bool getIndexedAddressParts(SDNode *Ptr, MVT VT, - bool isSEXTLoad, SDOperand &Base, - SDOperand &Offset, bool &isInc, + bool isSEXTLoad, SDValue &Base, + SDValue &Offset, bool &isInc, SelectionDAG &DAG) { if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB) return false; @@ -1713,15 +1713,15 @@ static bool getIndexedAddressParts(SDNode *Ptr, MVT VT, /// offset pointer and addressing mode by reference if the node's address /// can be legally represented as pre-indexed load / store address. bool -ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, - SDOperand &Offset, +ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, + SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) { if (Subtarget->isThumb()) return false; MVT VT; - SDOperand Ptr; + SDValue Ptr; bool isSEXTLoad = false; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { Ptr = LD->getBasePtr(); @@ -1747,15 +1747,15 @@ ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, /// offset pointer and addressing mode by reference if this node can be /// combined with a load / store to form a post-indexed load / store. bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, - SDOperand &Base, - SDOperand &Offset, + SDValue &Base, + SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) { if (Subtarget->isThumb()) return false; MVT VT; - SDOperand Ptr; + SDValue Ptr; bool isSEXTLoad = false; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { VT = LD->getMemoryVT(); @@ -1775,7 +1775,7 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, return false; } -void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, +void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 969d42b736..b717e0d73c 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -75,10 +75,10 @@ namespace llvm { public: explicit ARMTargetLowering(TargetMachine &TM); - virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG); - virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; virtual const char *getTargetNodeName(unsigned Opcode) const; @@ -92,8 +92,8 @@ namespace llvm { /// getPreIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if the node's address /// can be legally represented as pre-indexed load / store address. - virtual bool getPreIndexedAddressParts(SDNode *N, SDOperand &Base, - SDOperand &Offset, + virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, + SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG); @@ -101,11 +101,11 @@ namespace llvm { /// offset pointer and addressing mode by reference if this node can be /// combined with a load / store to form a post-indexed load / store. virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, - SDOperand &Base, SDOperand &Offset, + SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG); - virtual void computeMaskedBitsForTargetNode(const SDOperand Op, + virtual void computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -132,22 +132,22 @@ namespace llvm { /// unsigned ARMPCLabelIndex; - SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerGlobalAddressDarwin(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerGlobalAddressELF(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, + SDValue LowerCALL(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, SelectionDAG &DAG); - SDOperand LowerToTLSExecModels(GlobalAddressSDNode *GA, + SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA, SelectionDAG &DAG); - SDOperand LowerGLOBAL_OFFSET_TABLE(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerBR_JT(SDOperand Op, SelectionDAG &DAG); - - SDOperand EmitTargetCodeForMemcpy(SelectionDAG &DAG, - SDOperand Chain, - SDOperand Dst, SDOperand Src, - SDOperand Size, unsigned Align, + SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG); + SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG); + SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG); + + SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, + SDValue Chain, + SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, bool AlwaysInline, const Value *DstSV, uint64_t DstSVOff, const Value *SrcSV, uint64_t SrcSVOff); diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index 34487ff5ef..a5719e4f13 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -152,7 +152,7 @@ def so_imm_not : // sext_16_node predicate - True if the SDNode is sign-extended 16 or more bits. def sext_16_node : PatLeaf<(i32 GPR:$a), [{ - return CurDAG->ComputeNumSignBits(SDOperand(N,0)) >= 17; + return CurDAG->ComputeNumSignBits(SDValue(N,0)) >= 17; }]>; diff --git a/lib/Target/Alpha/AlphaISelDAGToDAG.cpp b/lib/Target/Alpha/AlphaISelDAGToDAG.cpp index 01a6856f07..89dee2a4fa 100644 --- a/lib/Target/Alpha/AlphaISelDAGToDAG.cpp +++ b/lib/Target/Alpha/AlphaISelDAGToDAG.cpp @@ -65,7 +65,7 @@ namespace { /// that the bits 1-7 of LHS are already zero. If LHS is non-null, we are /// in checking mode. If LHS is null, we assume that the mask has already /// been validated before. - uint64_t get_zapImm(SDOperand LHS, uint64_t Constant) { + uint64_t get_zapImm(SDValue LHS, uint64_t Constant) { uint64_t BitsToCheck = 0; unsigned Result = 0; for (unsigned i = 0; i != 8; ++i) { @@ -132,15 +132,15 @@ namespace { return (x - y) == r; } - static bool isFPZ(SDOperand N) { + static bool isFPZ(SDValue N) { ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N); return (CN && (CN->getValueAPF().isZero())); } - static bool isFPZn(SDOperand N) { + static bool isFPZn(SDValue N) { ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N); return (CN && CN->getValueAPF().isNegZero()); } - static bool isFPZp(SDOperand N) { + static bool isFPZp(SDValue N) { ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N); return (CN && CN->getValueAPF().isPosZero()); } @@ -153,13 +153,13 @@ namespace { /// getI64Imm - Return a target constant with the specified value, of type /// i64. - inline SDOperand getI64Imm(int64_t Imm) { + inline SDValue getI64Imm(int64_t Imm) { return CurDAG->getTargetConstant(Imm, MVT::i64); } // Select - Convert the specified operand from a target-independent to a // target-specific node if it hasn't already been changed. - SDNode *Select(SDOperand Op); + SDNode *Select(SDValue Op); /// InstructionSelect - This callback is invoked by /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. @@ -171,11 +171,11 @@ namespace { /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for /// inline asm expressions. - virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op, + virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, - std::vector<SDOperand> &OutOps, + std::vector<SDValue> &OutOps, SelectionDAG &DAG) { - SDOperand Op0; + SDValue Op0; switch (ConstraintCode) { default: return true; case 'm': // memory @@ -192,9 +192,9 @@ namespace { #include "AlphaGenDAGISel.inc" private: - SDOperand getGlobalBaseReg(); - SDOperand getGlobalRetAddr(); - void SelectCALL(SDOperand Op); + SDValue getGlobalBaseReg(); + SDValue getGlobalRetAddr(); + void SelectCALL(SDValue Op); }; } @@ -202,7 +202,7 @@ private: /// getGlobalBaseReg - Output the instructions required to put the /// GOT address into a register. /// -SDOperand AlphaDAGToDAGISel::getGlobalBaseReg() { +SDValue AlphaDAGToDAGISel::getGlobalBaseReg() { unsigned GP = 0; for(MachineRegisterInfo::livein_iterator ii = RegInfo->livein_begin(), ee = RegInfo->livein_end(); ii != ee; ++ii) @@ -217,7 +217,7 @@ SDOperand AlphaDAGToDAGISel::getGlobalBaseReg() { /// getRASaveReg - Grab the return address /// -SDOperand AlphaDAGToDAGISel::getGlobalRetAddr() { +SDValue AlphaDAGToDAGISel::getGlobalRetAddr() { unsigned RA = 0; for(MachineRegisterInfo::livein_iterator ii = RegInfo->livein_begin(), ee = RegInfo->livein_end(); ii != ee; ++ii) @@ -242,7 +242,7 @@ void AlphaDAGToDAGISel::InstructionSelect(SelectionDAG &DAG) { // Select - Convert the specified operand from a target-independent to a // target-specific node if it hasn't already been changed. -SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { +SDNode *AlphaDAGToDAGISel::Select(SDValue Op) { SDNode *N = Op.Val; if (N->isMachineOpcode()) { return NULL; // Already selected. @@ -261,26 +261,26 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { getI64Imm(0)); } case ISD::GLOBAL_OFFSET_TABLE: { - SDOperand Result = getGlobalBaseReg(); + SDValue Result = getGlobalBaseReg(); ReplaceUses(Op, Result); return NULL; } case AlphaISD::GlobalRetAddr: { - SDOperand Result = getGlobalRetAddr(); + SDValue Result = getGlobalRetAddr(); ReplaceUses(Op, Result); return NULL; } case AlphaISD::DivCall: { - SDOperand Chain = CurDAG->getEntryNode(); - SDOperand N0 = Op.getOperand(0); - SDOperand N1 = Op.getOperand(1); - SDOperand N2 = Op.getOperand(2); + SDValue Chain = CurDAG->getEntryNode(); + SDValue N0 = Op.getOperand(0); + SDValue N1 = Op.getOperand(1); + SDValue N2 = Op.getOperand(2); AddToISelQueue(N0); AddToISelQueue(N1); AddToISelQueue(N2); Chain = CurDAG->getCopyToReg(Chain, Alpha::R24, N1, - SDOperand(0,0)); + SDValue(0,0)); Chain = CurDAG->getCopyToReg(Chain, Alpha::R25, N2, Chain.getValue(1)); Chain = CurDAG->getCopyToReg(Chain, Alpha::R27, N0, @@ -289,12 +289,12 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { CurDAG->getTargetNode(Alpha::JSRs, MVT::Other, MVT::Flag, Chain, Chain.getValue(1)); Chain = CurDAG->getCopyFromReg(Chain, Alpha::R27, MVT::i64, - SDOperand(CNode, 1)); + SDValue(CNode, 1)); return CurDAG->SelectNodeTo(N, Alpha::BISr, MVT::i64, Chain, Chain); } case ISD::READCYCLECOUNTER: { - SDOperand Chain = N->getOperand(0); + SDValue Chain = N->getOperand(0); AddToISelQueue(Chain); //Select chain return CurDAG->getTargetNode(Alpha::RPCC, MVT::i64, MVT::Other, Chain); @@ -304,7 +304,7 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { uint64_t uval = cast<ConstantSDNode>(N)->getValue(); if (uval == 0) { - SDOperand Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), + SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), Alpha::R31, MVT::i64); ReplaceUses(Op, Result); return NULL; @@ -321,11 +321,11 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { break; //(zext (LDAH (LDA))) //Else use the constant pool ConstantInt *C = ConstantInt::get(Type::Int64Ty, uval); - SDOperand CPI = CurDAG->getTargetConstantPool(C, MVT::i64); + SDValue CPI = CurDAG->getTargetConstantPool(C, MVT::i64); SDNode *Tmp = CurDAG->getTargetNode(Alpha::LDAHr, MVT::i64, CPI, getGlobalBaseReg()); return CurDAG->SelectNodeTo(N, Alpha::LDQr, MVT::i64, MVT::Other, - CPI, SDOperand(Tmp, 0), CurDAG->getEntryNode()); + CPI, SDValue(Tmp, 0), CurDAG->getEntryNode()); } case ISD::TargetConstantFP: { ConstantFPSDNode *CN = cast<ConstantFPSDNode>(N); @@ -371,13 +371,13 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { case ISD::SETUO: Opc = Alpha::CMPTUN; break; }; - SDOperand tmp1 = N->getOperand(rev?1:0); - SDOperand tmp2 = N->getOperand(rev?0:1); + SDValue tmp1 = N->getOperand(rev?1:0); + SDValue tmp2 = N->getOperand(rev?0:1); AddToISelQueue(tmp1); AddToISelQueue(tmp2); SDNode *cmp = CurDAG->getTargetNode(Opc, MVT::f64, tmp1, tmp2); if (inv) - cmp = CurDAG->getTargetNode(Alpha::CMPTEQ, MVT::f64, SDOperand(cmp, 0), + cmp = CurDAG->getTargetNode(Alpha::CMPTEQ, MVT::f64, SDValue(cmp, 0), CurDAG->getRegister(Alpha::F31, MVT::f64)); switch(CC) { case ISD::SETUEQ: case ISD::SETULT: case ISD::SETULE: @@ -386,16 +386,16 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { SDNode* cmp2 = CurDAG->getTargetNode(Alpha::CMPTUN, MVT::f64, tmp1, tmp2); cmp = CurDAG->getTargetNode(Alpha::ADDT, MVT::f64, - SDOperand(cmp2, 0), SDOperand(cmp, 0)); + SDValue(cmp2, 0), SDValue(cmp, 0)); break; } default: break; } - SDNode* LD = CurDAG->getTargetNode(Alpha::FTOIT, MVT::i64, SDOperand(cmp, 0)); + SDNode* LD = CurDAG->getTargetNode(Alpha::FTOIT, MVT::i64, SDValue(cmp, 0)); return CurDAG->getTargetNode(Alpha::CMPULT, MVT::i64, CurDAG->getRegister(Alpha::R31, MVT::i64), - SDOperand(LD,0)); + SDValue(LD,0)); } break; @@ -408,16 +408,16 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { // so that things like this can be caught in fall though code //move int to fp bool isDouble = N->getValueType(0) == MVT::f64; - SDOperand cond = N->getOperand(0); - SDOperand TV = N->getOperand(1); - SDOperand FV = N->getOperand(2); + SDValue cond = N->getOperand(0); + SDValue TV = N->getOperand(1); + SDValue FV = N->getOperand(2); AddToISelQueue(cond); AddToISelQueue(TV); AddToISelQueue(FV); SDNode* LD = CurDAG->getTargetNode(Alpha::ITOFT, MVT::f64, cond); return CurDAG->getTargetNode(isDouble?Alpha::FCMOVNET:Alpha::FCMOVNES, - MVT::f64, FV, TV, SDOperand(LD,0)); + MVT::f64, FV, TV, SDValue(LD,0)); } break; @@ -442,8 +442,8 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { if (get_zapImm(mask)) { AddToISelQueue(N->getOperand(0).getOperand(0)); - SDOperand Z = - SDOperand(CurDAG->getTargetNode(Alpha::ZAPNOTi, MVT::i64, + SDValue Z = + SDValue(CurDAG->getTargetNode(Alpha::ZAPNOTi, MVT::i64, N->getOperand(0).getOperand(0), getI64Imm(get_zapImm(mask))), 0); return CurDAG->getTargetNode(Alpha::SRLr, MVT::i64, Z, @@ -458,16 +458,16 @@ SDNode *AlphaDAGToDAGISel::Select(SDOperand Op) { return SelectCode(Op); } -void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { +void AlphaDAGToDAGISel::SelectCALL(SDValue Op) { //TODO: add flag stuff to prevent nondeturministic breakage! SDNode *N = Op.Val; - SDOperand Chain = N->getOperand(0); - SDOperand Addr = N->getOperand(1); - SDOperand InFlag(0,0); // Null incoming flag value. + SDValue Chain = N->getOperand(0); + SDValue Addr = N->getOperand(1); + SDValue InFlag(0,0); // Null incoming flag value. AddToISelQueue(Chain); - std::vector<SDOperand> CallOperands; + std::vector<SDValue> CallOperands; std::vector<MVT> TypeOperands; //grab the arguments @@ -494,10 +494,10 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { } else assert(0 && "Unknown operand"); - SDOperand Ops[] = { CallOperands[i], getI64Imm((i - 6) * 8), + SDValue Ops[] = { CallOperands[i], getI64Imm((i - 6) * 8), CurDAG->getCopyFromReg(Chain, Alpha::R30, MVT::i64), Chain }; - Chain = SDOperand(CurDAG->getTargetNode(Opc, MVT::Other, Ops, 4), 0); + Chain = SDValue(CurDAG->getTargetNode(Opc, MVT::Other, Ops, 4), 0); } for (int i = 0; i < std::min(6, count); ++i) { if (TypeOperands[i].isInteger()) { @@ -513,21 +513,21 @@ void AlphaDAGToDAGISel::SelectCALL(SDOperand Op) { // Finally, once everything is in registers to pass to the call, emit the // call itself. if (Addr.getOpcode() == AlphaISD::GPRelLo) { - SDOperand GOT = getGlobalBaseReg(); + SDValue GOT = getGlobalBaseReg(); Chain = CurDAG->getCopyToReg(Chain, Alpha::R29, GOT, InFlag); InFlag = Chain.getValue(1); - Chain = SDOperand(CurDAG->getTargetNode(Alpha::BSR, MVT::Other, MVT::Flag, + Chain = SDValue(CurDAG->getTargetNode(Alpha::BSR, MVT::Other, MVT::Flag, Addr.getOperand(0), Chain, InFlag), 0); } else { AddToISelQueue(Addr); Chain = CurDAG->getCopyToReg(Chain, Alpha::R27, Addr, InFlag); InFlag = Chain.getValue(1); - Chain = SDOperand(CurDAG->getTargetNode(Alpha::JSR, MVT::Other, MVT::Flag, + Chain = SDValue(CurDAG->getTargetNode(Alpha::JSR, MVT::Other, MVT::Flag, Chain, InFlag), 0); } InFlag = Chain.getValue(1); - std::vector<SDOperand> CallResults; + std::vector<SDValue> CallResults; switch (N->getValueType(0).getSimpleVT()) { default: assert(0 && "Unexpected ret value!"); diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp index 0f7fd80c57..5128646489 100644 --- a/lib/Target/Alpha/AlphaISelLowering.cpp +++ b/lib/Target/Alpha/AlphaISelLowering.cpp @@ -146,7 +146,7 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM) : TargetLowering(TM) computeRegisterProperties(); } -MVT AlphaTargetLowering::getSetCCResultType(const SDOperand &) const { +MVT AlphaTargetLowering::getSetCCResultType(const SDValue &) const { return MVT::i64; } @@ -168,15 +168,15 @@ const char *AlphaTargetLowering::getTargetNodeName(unsigned Opcode) const { } } -static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); - SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); - SDOperand Zero = DAG.getConstant(0, PtrVT); + SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); + SDValue Zero = DAG.getConstant(0, PtrVT); - SDOperand Hi = DAG.getNode(AlphaISD::GPRelHi, MVT::i64, JTI, + SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, MVT::i64, JTI, DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, MVT::i64)); - SDOperand Lo = DAG.getNode(AlphaISD::GPRelLo, MVT::i64, JTI, Hi); + SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, MVT::i64, JTI, Hi); return Lo; } @@ -199,13 +199,13 @@ static SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { // //#define GP $29 // //#define SP $30 -static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, +static SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsBase, int &VarArgsOffset) { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); - std::vector<SDOperand> ArgValues; - SDOperand Root = Op.getOperand(0); + std::vector<SDValue> ArgValues; + SDValue Root = Op.getOperand(0); AddLiveIn(MF, Alpha::R29, &Alpha::GPRCRegClass); //GP AddLiveIn(MF, Alpha::R26, &Alpha::GPRCRegClass); //RA @@ -216,9 +216,9 @@ static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, Alpha::F16, Alpha::F17, Alpha::F18, Alpha::F19, Alpha::F20, Alpha::F21}; for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { - SDOperand argt; + SDValue argt; MVT ObjectVT = Op.getValue(ArgNo).getValueType(); - SDOperand ArgVal; + SDValue ArgVal; if (ArgNo < 6) { switch (ObjectVT.getSimpleVT()) { @@ -246,7 +246,7 @@ static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, // Create the SelectionDAG nodes corresponding to a load //from this parameter - SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64); + SDValue FIN = DAG.getFrameIndex(FI, MVT::i64); ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); } ArgValues.push_back(ArgVal); @@ -256,14 +256,14 @@ static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; if (isVarArg) { VarArgsOffset = (Op.Val->getNumValues()-1) * 8; - std::vector<SDOperand> LS; + std::vector<SDValue> LS; for (int i = 0; i < 6; ++i) { if (TargetRegisterInfo::isPhysicalRegister(args_int[i])) args_int[i] = AddLiveIn(MF, args_int[i], &Alpha::GPRCRegClass); - SDOperand argt = DAG.getCopyFromReg(Root, args_int[i], MVT::i64); + SDValue argt = DAG.getCopyFromReg(Root, args_int[i], MVT::i64); int FI = MFI->CreateFixedObject(8, -8 * (6 - i)); if (i == 0) VarArgsBase = FI; - SDOperand SDFI = DAG.getFrameIndex(FI, MVT::i64); + SDValue SDFI = DAG.getFrameIndex(FI, MVT::i64); LS.push_back(DAG.getStore(Root, argt, SDFI, NULL, 0)); if (TargetRegisterInfo::isPhysicalRegister(args_float[i])) @@ -285,18 +285,18 @@ static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, ArgValues.size()); } -static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { - SDOperand Copy = DAG.getCopyToReg(Op.getOperand(0), Alpha::R26, +static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) { + SDValue Copy = DAG.getCopyToReg(Op.getOperand(0), Alpha::R26, DAG.getNode(AlphaISD::GlobalRetAddr, MVT::i64), - SDOperand()); + SDValue()); switch (Op.getNumOperands()) { default: assert(0 && "Do not know how to return this many arguments!"); abort(); case 1: break; - //return SDOperand(); // ret void is legal + //return SDValue(); // ret void is legal case 3: { MVT ArgVT = Op.getOperand(1).getValueType(); unsigned ArgReg; @@ -315,11 +315,11 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { return DAG.getNode(AlphaISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1)); } -std::pair<SDOperand, SDOperand> -AlphaTargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, +std::pair<SDValue, SDValue> +AlphaTargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, bool isVarArg, unsigned CallingConv, bool isTailCall, - SDOperand Callee, ArgListTy &Args, + SDValue Callee, ArgListTy &Args, SelectionDAG &DAG) { int NumBytes = 0; if (Args.size() > 6) @@ -327,7 +327,7 @@ AlphaTargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, getPointerTy())); - std::vector<SDOperand> args_to_use; + std::vector<SDValue> args_to_use; for (unsigned i = 0, e = Args.size(); i != e; ++i) { switch (getValueType(Args[i].Ty).getSimpleVT()) { @@ -363,17 +363,17 @@ AlphaTargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, RetVals.push_back(ActualRetTyVT); RetVals.push_back(MVT::Other); - std::vector<SDOperand> Ops; + std::vector<SDValue> Ops; Ops.push_back(Chain); Ops.push_back(Callee); Ops.insert(Ops.end(), args_to_use.begin(), args_to_use.end()); - SDOperand TheCall = DAG.getNode(AlphaISD::CALL, RetVals, &Ops[0], Ops.size()); + SDValue TheCall = DAG.getNode(AlphaISD::CALL, RetVals, &Ops[0], Ops.size()); Chain = TheCall.getValue(RetTyVT != MVT::isVoid); Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, getPointerTy()), DAG.getConstant(0, getPointerTy()), - SDOperand()); - SDOperand RetVal = TheCall; + SDValue()); + SDValue RetVal = TheCall; if (RetTyVT != ActualRetTyVT) { ISD::NodeType AssertKind = ISD::DELETED_NODE; @@ -392,29 +392,29 @@ AlphaTargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, return std::make_pair(RetVal, Chain); } -void AlphaTargetLowering::LowerVAARG(SDNode *N, SDOperand &Chain, - SDOperand &DataPtr, SelectionDAG &DAG) { +void AlphaTargetLowering::LowerVAARG(SDNode *N, SDValue &Chain, + SDValue &DataPtr, SelectionDAG &DAG) { Chain = N->getOperand(0); - SDOperand VAListP = N->getOperand(1); + SDValue VAListP = N->getOperand(1); const Value *VAListS = cast<SrcValueSDNode>(N->getOperand(2))->getValue(); - SDOperand Base = DAG.getLoad(MVT::i64, Chain, VAListP, VAListS, 0); - SDOperand Tmp = DAG.getNode(ISD::ADD, MVT::i64, VAListP, + SDValue Base = DAG.getLoad(MVT::i64, Chain, VAListP, VAListS, 0); + SDValue Tmp = DAG.getNode(ISD::ADD, MVT::i64, VAListP, DAG.getConstant(8, MVT::i64)); - SDOperand Offset = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Base.getValue(1), + SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Base.getValue(1), Tmp, NULL, 0, MVT::i32); DataPtr = DAG.getNode(ISD::ADD, MVT::i64, Base, Offset); if (N->getValueType(0).isFloatingPoint()) { //if fp && Offset < 6*8, then subtract 6*8 from DataPtr - SDOperand FPDataPtr = DAG.getNode(ISD::SUB, MVT::i64, DataPtr, + SDValue FPDataPtr = DAG.getNode(ISD::SUB, MVT::i64, DataPtr, DAG.getConstant(8*6, MVT::i64)); - SDOperand CC = DAG.getSetCC(MVT::i64, Offset, + SDValue CC = DAG.getSetCC(MVT::i64, Offset, DAG.getConstant(8*6, MVT::i64), ISD::SETLT); DataPtr = DAG.getNode(ISD::SELECT, MVT::i64, CC, FPDataPtr, DataPtr); } - SDOperand NewOffset = DAG.getNode(ISD::ADD, MVT::i64, Offset, + SDValue NewOffset = DAG.getNode(ISD::ADD, MVT::i64, Offset, DAG.getConstant(8, MVT::i64)); Chain = DAG.getTruncStore(Offset.getValue(1), NewOffset, Tmp, NULL, 0, MVT::i32); @@ -422,7 +422,7 @@ void AlphaTargetLowering::LowerVAARG(SDNode *N, SDOperand &Chain, /// LowerOperation - Provide custom lowering hooks for some operations. /// -SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { +SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Wasn't expecting to be able to lower this!"); case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG, @@ -435,16 +435,16 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::SINT_TO_FP: { assert(Op.getOperand(0).getValueType() == MVT::i64 && "Unhandled SINT_TO_FP type in custom expander!"); - SDOperand LD; + SDValue LD; bool isDouble = Op.getValueType() == MVT::f64; LD = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); - SDOperand FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, + SDValue FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, isDouble?MVT::f64:MVT::f32, LD); return FP; } case ISD::FP_TO_SINT: { bool isDouble = Op.getOperand(0).getValueType() == MVT::f64; - SDOperand src = Op.getOperand(0); + SDValue src = Op.getOperand(0); if (!isDouble) //Promote src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, src); @@ -456,11 +456,11 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::ConstantPool: { ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); Constant *C = CP->getConstVal(); - SDOperand CPI = DAG.getTargetConstantPool(C, MVT::i64, CP->getAlignment()); + SDValue CPI = DAG.getTargetConstantPool(C, MVT::i64, CP->getAlignment()); - SDOperand Hi = DAG.getNode(AlphaISD::GPRelHi, MVT::i64, CPI, + SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, MVT::i64, CPI, DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, MVT::i64)); - SDOperand Lo = DAG.getNode(AlphaISD::GPRelLo, MVT::i64, CPI, Hi); + SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, MVT::i64, CPI, Hi); return Lo; } case ISD::GlobalTLSAddress: @@ -468,13 +468,13 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::GlobalAddress: { GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); - SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i64, GSDN->getOffset()); + SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i64, GSDN->getOffset()); // if (!GV->hasWeakLinkage() && !GV->isDeclaration() && !GV->hasLinkOnceLinkage()) { if (GV->hasInternalLinkage()) { - SDOperand Hi = DAG.getNode(AlphaISD::GPRelHi, MVT::i64, GA, + SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, MVT::i64, GA, DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, MVT::i64)); - SDOperand Lo = DAG.getNode(AlphaISD::GPRelLo, MVT::i64, GA, Hi); + SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, MVT::i64, GA, Hi); return Lo; } else return DAG.getNode(AlphaISD::RelLit, MVT::i64, GA, @@ -492,7 +492,7 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { //Expand only on constant case if (Op.getOperand(1).getOpcode() == ISD::Constant) { MVT VT = Op.Val->getValueType(0); - SDOperand Tmp1 = Op.Val->getOpcode() == ISD::UREM ? + SDValue Tmp1 = Op.Val->getOpcode() == ISD::UREM ? BuildUDIV(Op.Val, DAG, NULL) : BuildSDIV(Op.Val, DAG, NULL); Tmp1 = DAG.getNode(ISD::MUL, VT, Tmp1, Op.getOperand(1)); @@ -513,7 +513,7 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::UDIV: opstr = "__divqu"; break; case ISD::SDIV: opstr = "__divq"; break; } - SDOperand Tmp1 = Op.getOperand(0), + SDValue Tmp1 = Op.getOperand(0), Tmp2 = Op.getOperand(1), Addr = DAG.getExternalSymbol(opstr, MVT::i64); return DAG.getNode(AlphaISD::DivCall, MVT::i64, Addr, Tmp1, Tmp2); @@ -521,10 +521,10 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { break; case ISD::VAARG: { - SDOperand Chain, DataPtr; + SDValue Chain, DataPtr; LowerVAARG(Op.Val, Chain, DataPtr, DAG); - SDOperand Result; + SDValue Result; if (Op.getValueType() == MVT::i32) Result = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Chain, DataPtr, NULL, 0, MVT::i32); @@ -533,30 +533,30 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { return Result; } case ISD::VACOPY: { - SDOperand Chain = Op.getOperand(0); - SDOperand DestP = Op.getOperand(1); - SDOperand SrcP = Op.getOperand(2); + SDValue Chain = Op.getOperand(0); + SDValue DestP = Op.getOperand(1); + SDValue SrcP = Op.getOperand(2); const Value *DestS = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); const Value *SrcS = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); - SDOperand Val = DAG.getLoad(getPointerTy(), Chain, SrcP, SrcS, 0); - SDOperand Result = DAG.getStore(Val.getValue(1), Val, DestP, DestS, 0); - SDOperand NP = DAG.getNode(ISD::ADD, MVT::i64, SrcP, + SDValue Val = DAG.getLoad(getPointerTy(), Chain, SrcP, SrcS, 0); + SDValue Result = DAG.getStore(Val.getValue(1), Val, DestP, DestS, 0); + SDValue NP = DAG.getNode(ISD::ADD, MVT::i64, SrcP, DAG.getConstant(8, MVT::i64)); Val = DAG.getExtLoad(ISD::SEXTLOAD, MVT::i64, Result, NP, NULL,0, MVT::i32); - SDOperand NPD = DAG.getNode(ISD::ADD, MVT::i64, DestP, + SDValue NPD = DAG.getNode(ISD::ADD, MVT::i64, DestP, DAG.getConstant(8, MVT::i64)); return DAG.getTruncStore(Val.getValue(1), Val, NPD, NULL, 0, MVT::i32); } case ISD::VASTART: { - SDOperand Chain = Op.getOperand(0); - SDOperand VAListP = Op.getOperand(1); + SDValue Chain = Op.getOperand(0); + SDValue VAListP = Op.getOperand(1); const Value *VAListS = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); // vastart stores the address of the VarArgsBase and VarArgsOffset - SDOperand FR = DAG.getFrameIndex(VarArgsBase, MVT::i64); - SDOperand S1 = DAG.getStore(Chain, FR, VAListP, VAListS, 0); - SDOperand SA2 = DAG.getNode(ISD::ADD, MVT::i64, VAListP, + SDValue FR = DAG.getFrameIndex(VarArgsBase, MVT::i64); + SDValue S1 = DAG.getStore(Chain, FR, VAListP, VAListS, 0); + SDValue SA2 = DAG.getNode(ISD::ADD, MVT::i64, VAListP, DAG.getConstant(8, MVT::i64)); return DAG.getTruncStore(S1, DAG.getConstant(VarArgsOffset, MVT::i64), SA2, NULL, 0, MVT::i32); @@ -567,7 +567,7 @@ SDOperand AlphaTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::FRAMEADDR: break; } - return SDOperand(); + return SDValue(); } SDNode *AlphaTargetLowering::ReplaceNodeResults(SDNode *N, @@ -576,7 +576,7 @@ SDNode *AlphaTargetLowering::ReplaceNodeResults(SDNode *N, N->getOpcode() == ISD::VAARG && "Unknown node to custom promote!"); - SDOperand Chain, DataPtr; + SDValue Chain, DataPtr; LowerVAARG(N, Chain, DataPtr, DAG); return DAG.getLoad(N->getValueType(0), Chain, DataPtr, NULL, 0).Val; } diff --git a/lib/Target/Alpha/AlphaISelLowering.h b/lib/Target/Alpha/AlphaISelLowering.h index 1440908c4f..770dde33c8 100644 --- a/lib/Target/Alpha/AlphaISelLowering.h +++ b/lib/Target/Alpha/AlphaISelLowering.h @@ -67,11 +67,11 @@ namespace llvm { explicit AlphaTargetLowering(TargetMachine &TM); /// getSetCCResultType - Get the SETCC result ValueType - virtual MVT getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDValue &) const; /// LowerOperation - Provide custom lowering hooks for some operations. /// - virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG); // Friendly names for dumps @@ -79,9 +79,9 @@ namespace llvm { /// LowerCallTo - This hook lowers an abstract call to a function into an /// actual call. - virtual std::pair<SDOperand, SDOperand> - LowerCallTo(SDOperand Chain, const Type *RetTy, bool RetSExt, bool RetZExt, - bool isVarArg, unsigned CC, bool isTailCall, SDOperand Callee, + virtual std::pair<SDValue, SDValue> + LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, + bool isVarArg, unsigned CC, bool isTailCall, SDValue Callee, ArgListTy &Args, SelectionDAG &DAG); ConstraintType getConstraintType(const std::string &Constraint) const; @@ -97,7 +97,7 @@ namespace llvm { private: // Helpers for custom lowering. - void LowerVAARG(SDNode *N, SDOperand &Chain, SDOperand &DataPtr, + void LowerVAARG(SDNode *N, SDValue &Chain, SDValue &DataPtr, SelectionDAG &DAG); }; diff --git a/lib/Target/Alpha/AlphaInstrInfo.td b/lib/Target/Alpha/AlphaInstrInfo.td index 42bd8edaff..5078893acf 100644 --- a/lib/Target/Alpha/AlphaInstrInfo.td +++ b/lib/Target/Alpha/AlphaInstrInfo.td @@ -62,7 +62,7 @@ def LH16 : SDNodeXForm<imm, [{ //ldah part of constant (or more if too big) }]>; def iZAPX : SDNodeXForm<and, [{ // get imm to ZAPi ConstantSDNode *RHS = cast<ConstantSDNode>(N->getOperand(1)); - return getI64Imm(get_zapImm(SDOperand(), RHS->getValue())); + return getI64Imm(get_zapImm(SDValue(), RHS->getValue())); }]>; def nearP2X : SDNodeXForm<imm, [{ return getI64Imm(Log2_64(getNearPower2((uint64_t)N->getValue()))); diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp index 0e935ac35d..84a1bd8542 100644 --- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp +++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp @@ -151,7 +151,7 @@ namespace { } bool - isHighLow(const SDOperand &Op) + isHighLow(const SDValue &Op) { return (Op.getOpcode() == SPUISD::IndirectAddr && ((Op.getOperand(0).getOpcode() == SPUISD::Hi @@ -242,52 +242,52 @@ public: /// getI32Imm - Return a target constant with the specified value, of type /// i32. - inline SDOperand getI32Imm(uint32_t Imm) { + inline SDValue getI32Imm(uint32_t Imm) { return CurDAG->getTargetConstant(Imm, MVT::i32); } /// getI64Imm - Return a target constant with the specified value, of type /// i64. - inline SDOperand getI64Imm(uint64_t Imm) { + inline SDValue getI64Imm(uint64_t Imm) { return CurDAG->getTargetConstant(Imm, MVT::i64); } /// getSmallIPtrImm - Return a target constant of pointer type. - inline SDOperand getSmallIPtrImm(unsigned Imm) { + inline SDValue getSmallIPtrImm(unsigned Imm) { return CurDAG->getTargetConstant(Imm, SPUtli.getPointerTy()); } /// Select - Convert the specified operand from a target-independent to a /// target-specific node if it hasn't already been changed. - SDNode *Select(SDOperand Op); + SDNode *Select(SDValue Op); //! Returns true if the address N is an A-form (local store) address - bool SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Index); + bool SelectAFormAddr(SDValue Op, SDValue N, SDValue &Base, + SDValue &Index); //! D-form address predicate - bool SelectDFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Index); + bool SelectDFormAddr(SDValue Op, SDValue N, SDValue &Base, + SDValue &Index); /// Alternate D-form address using i7 offset predicate - bool SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp, - SDOperand &Base); + bool SelectDForm2Addr(SDValue Op, SDValue N, SDValue &Disp, + SDValue &Base); /// D-form address selection workhorse - bool DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Disp, - SDOperand &Base, int minOffset, int maxOffset); + bool DFormAddressPredicate(SDValue Op, SDValue N, SDValue &Disp, + SDValue &Base, int minOffset, int maxOffset); //! Address predicate if N can be expressed as an indexed [r+r] operation. - bool SelectXFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Index); + bool SelectXFormAddr(SDValue Op, SDValue N, SDValue &Base, + SDValue &Index); /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for /// inline asm expressions. - virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op, + virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, - std::vector<SDOperand> &OutOps, + std::vector<SDValue> &OutOps, SelectionDAG &DAG) { - SDOperand Op0, Op1; + SDValue Op0, Op1; switch (ConstraintCode) { default: return true; case 'm': // memory @@ -358,11 +358,11 @@ SPUDAGToDAGISel::InstructionSelect(SelectionDAG &DAG) \arg Index The base address index */ bool -SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Index) { +SPUDAGToDAGISel::SelectAFormAddr(SDValue Op, SDValue N, SDValue &Base, + SDValue &Index) { // These match the addr256k operand type: MVT OffsVT = MVT::i16; - SDOperand Zero = CurDAG->getTargetConstant(0, OffsVT); + SDValue Zero = CurDAG->getTargetConstant(0, OffsVT); switch (N.getOpcode()) { case ISD::Constant: @@ -384,7 +384,7 @@ SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, // Just load from memory if there's only a single use of the location, // otherwise, this will get handled below with D-form offset addresses if (N.hasOneUse()) { - SDOperand Op0 = N.getOperand(0); + SDValue Op0 = N.getOperand(0); switch (Op0.getOpcode()) { case ISD::TargetConstantPool: case ISD::TargetJumpTable: @@ -410,8 +410,8 @@ SPUDAGToDAGISel::SelectAFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, } bool -SPUDAGToDAGISel::SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp, - SDOperand &Base) { +SPUDAGToDAGISel::SelectDForm2Addr(SDValue Op, SDValue N, SDValue &Disp, + SDValue &Base) { const int minDForm2Offset = -(1 << 7); const int maxDForm2Offset = (1 << 7) - 1; return DFormAddressPredicate(Op, N, Disp, Base, minDForm2Offset, @@ -428,19 +428,19 @@ SPUDAGToDAGISel::SelectDForm2Addr(SDOperand Op, SDOperand N, SDOperand &Disp, displacement, [r+I10] (D-form address). \return true if \a N is a D-form address with \a Base and \a Index set - to non-empty SDOperand instances. + to non-empty SDValue instances. */ bool -SPUDAGToDAGISel::SelectDFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Index) { +SPUDAGToDAGISel::SelectDFormAddr(SDValue Op, SDValue N, SDValue &Base, + SDValue &Index) { return DFormAddressPredicate(Op, N, Base, Index, SPUFrameInfo::minFrameOffset(), SPUFrameInfo::maxFrameOffset()); } bool -SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Index, int minOffset, +SPUDAGToDAGISel::DFormAddressPredicate(SDValue Op, SDValue N, SDValue &Base, + SDValue &Index, int minOffset, int maxOffset) { unsigned Opc = N.getOpcode(); MVT PtrTy = SPUtli.getPointerTy(); @@ -458,8 +458,8 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Bas } } else if (Opc == ISD::ADD) { // Generated by getelementptr - const SDOperand Op0 = N.getOperand(0); - const SDOperand Op1 = N.getOperand(1); + const SDValue Op0 = N.getOperand(0); + const SDValue Op1 = N.getOperand(1); if ((Op0.getOpcode() == SPUISD::Hi && Op1.getOpcode() == SPUISD::Lo) || (Op1.getOpcode() == SPUISD::Hi && Op0.getOpcode() == SPUISD::Lo)) { @@ -511,8 +511,8 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Bas } } else if (Opc == SPUISD::IndirectAddr) { // Indirect with constant offset -> D-Form address - const SDOperand Op0 = N.getOperand(0); - const SDOperand Op1 = N.getOperand(1); + const SDValue Op0 = N.getOperand(0); + const SDValue Op1 = N.getOperand(1); if (Op0.getOpcode() == SPUISD::Hi && Op1.getOpcode() == SPUISD::Lo) { @@ -522,7 +522,7 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Bas return true; } else if (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1)) { int32_t offset = 0; - SDOperand idxOp; + SDValue idxOp; if (isa<ConstantSDNode>(Op1)) { ConstantSDNode *CN = cast<ConstantSDNode>(Op1); @@ -563,8 +563,8 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDOperand Op, SDOperand N, SDOperand &Bas address. */ bool -SPUDAGToDAGISel::SelectXFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Index) { +SPUDAGToDAGISel::SelectXFormAddr(SDValue Op, SDValue N, SDValue &Base, + SDValue &Index) { if (SelectAFormAddr(Op, N, Base, Index) || SelectDFormAddr(Op, N, Base, Index)) return false; @@ -579,13 +579,13 @@ SPUDAGToDAGISel::SelectXFormAddr(SDOperand Op, SDOperand N, SDOperand &Base, /*! */ SDNode * -SPUDAGToDAGISel::Select(SDOperand Op) { +SPUDAGToDAGISel::Select(SDValue Op) { SDNode *N = Op.Val; unsigned Opc = N->getOpcode(); int n_ops = -1; unsigned NewOpc; MVT OpVT = Op.getValueType(); - SDOperand Ops[8]; + SDValue Ops[8]; if (N->isMachineOpcode()) { return NULL; // Already selected. @@ -617,7 +617,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { } } else if (Opc == ISD::ZERO_EXTEND) { // (zero_extend:i16 (and:i8 <arg>, <const>)) - const SDOperand &Op1 = N->getOperand(0); + const SDValue &Op1 = N->getOperand(0); if (Op.getValueType() == MVT::i16 && Op1.getValueType() == MVT::i8) { if (Op1.getOpcode() == ISD::AND) { @@ -634,8 +634,8 @@ SPUDAGToDAGISel::Select(SDOperand Op) { } else if (Opc == SPUISD::LDRESULT) { // Custom select instructions for LDRESULT MVT VT = N->getValueType(0); - SDOperand Arg = N->getOperand(0); - SDOperand Chain = N->getOperand(1); + SDValue Arg = N->getOperand(0); + SDValue Chain = N->getOperand(1); SDNode *Result; const valtype_map_s *vtm = getValueTypeMapEntry(VT); @@ -649,7 +649,7 @@ SPUDAGToDAGISel::Select(SDOperand Op) { AddToISelQueue(Arg); Opc = vtm->ldresult_ins; if (vtm->ldresult_imm) { - SDOperand Zero = CurDAG->getTargetConstant(0, VT); + SDValue Zero = CurDAG->getTargetConstant(0, VT); AddToISelQueue(Zero); Result = CurDAG->getTargetNode(Opc, VT, MVT::Other, Arg, Zero, Chain); @@ -657,16 +657,16 @@ SPUDAGToDAGISel::Select(SDOperand Op) { Result = CurDAG->getTargetNode(Opc, MVT::Other, Arg, Arg, Chain); } - Chain = SDOperand(Result, 1); + Chain = SDValue(Result, 1); AddToISelQueue(Chain); return Result; } else if (Opc == SPUISD::IndirectAddr) { - SDOperand Op0 = Op.getOperand(0); + SDValue Op0 = Op.getOperand(0); if (Op0.getOpcode() == SPUISD::LDRESULT) { /* || Op0.getOpcode() == SPUISD::AFormAddr) */ // (IndirectAddr (LDRESULT, imm)) - SDOperand Op1 = Op.getOperand(1); + SDValue Op1 = Op.getOperand(1); MVT VT = Op.getValueType(); DEBUG(cerr << "CellSPU: IndirectAddr(LDRESULT, imm):\nOp0 = "); diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 607d0dcc04..2fd72511ba 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -85,7 +85,7 @@ namespace { address, external symbol, constant pool) or an A-form address. */ - bool isMemoryOperand(const SDOperand &Op) + bool isMemoryOperand(const SDValue &Op) { const unsigned Opc = Op.getOpcode(); return (Opc == ISD::GlobalAddress @@ -102,7 +102,7 @@ namespace { } //! Predicate that returns true if the operand is an indirect target - bool isIndirectOperand(const SDOperand &Op) + bool isIndirectOperand(const SDValue &Op) { const unsigned Opc = Op.getOpcode(); return (Opc == ISD::Register @@ -453,7 +453,7 @@ SPUTargetLowering::getTargetNodeName(unsigned Opcode) const return ((i != node_names.end()) ? i->second : 0); } -MVT SPUTargetLowering::getSetCCResultType(const SDOperand &Op) const { +MVT SPUTargetLowering::getSetCCResultType(const SDValue &Op) const { MVT VT = Op.getValueType(); if (VT.isInteger()) return VT; @@ -491,19 +491,19 @@ MVT SPUTargetLowering::getSetCCResultType(const SDOperand &Op) const { Both load and store lowering load a block of data aligned on a 16-byte boundary. This is the common aligned load code shared between both. */ -static SDOperand -AlignedLoad(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST, +static SDValue +AlignedLoad(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST, LSBaseSDNode *LSN, unsigned &alignment, int &alignOffs, int &prefSlotOffs, MVT &VT, bool &was16aligned) { MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); const valtype_map_s *vtm = getValueTypeMapEntry(VT); - SDOperand basePtr = LSN->getBasePtr(); - SDOperand chain = LSN->getChain(); + SDValue basePtr = LSN->getBasePtr(); + SDValue chain = LSN->getChain(); if (basePtr.getOpcode() == ISD::ADD) { - SDOperand Op1 = basePtr.Val->getOperand(1); + SDValue Op1 = basePtr.Val->getOperand(1); if (Op1.getOpcode() == ISD::Constant || Op1.getOpcode() == ISD::TargetConstant) { const ConstantSDNode *CN = cast<ConstantSDNode>(basePtr.getOperand(1)); @@ -518,7 +518,7 @@ AlignedLoad(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST, // Loading from memory, can we adjust alignment? if (basePtr.getOpcode() == SPUISD::AFormAddr) { - SDOperand APtr = basePtr.getOperand(0); + SDValue APtr = basePtr.getOperand(0); if (APtr.getOpcode() == ISD::TargetGlobalAddress) { GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(APtr); alignment = GSDN->getGlobal()->getAlignment(); @@ -574,21 +574,21 @@ AlignedLoad(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST, All CellSPU loads and stores are aligned to 16-byte boundaries, so for elements within a 16-byte block, we have to rotate to extract the requested element. */ -static SDOperand -LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { +static SDValue +LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { LoadSDNode *LN = cast<LoadSDNode>(Op); - SDOperand the_chain = LN->getChain(); + SDValue the_chain = LN->getChain(); MVT VT = LN->getMemoryVT(); MVT OpVT = Op.Val->getValueType(0); ISD::LoadExtType ExtType = LN->getExtensionType(); unsigned alignment = LN->getAlignment(); - SDOperand Ops[8]; + SDValue Ops[8]; switch (LN->getAddressingMode()) { case ISD::UNINDEXED: { int offset, rotamt; bool was16aligned; - SDOperand result = + SDValue result = AlignedLoad(Op, DAG, ST, LN,alignment, offset, rotamt, VT, was16aligned); if (result.Val == 0) @@ -652,7 +652,7 @@ LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { } SDVTList retvts = DAG.getVTList(OpVT, MVT::Other); - SDOperand retops[2] = { + SDValue retops[2] = { result, the_chain }; @@ -673,7 +673,7 @@ LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { /*NOTREACHED*/ } - return SDOperand(); + return SDValue(); } /// Custom lower stores for CellSPU @@ -682,10 +682,10 @@ LowerLOAD(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { within a 16-byte block, we have to generate a shuffle to insert the requested element into its place, then store the resulting block. */ -static SDOperand -LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { +static SDValue +LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { StoreSDNode *SN = cast<StoreSDNode>(Op); - SDOperand Value = SN->getValue(); + SDValue Value = SN->getValue(); MVT VT = Value.getValueType(); MVT StVT = (!SN->isTruncatingStore() ? VT : SN->getMemoryVT()); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); @@ -704,7 +704,7 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { stVecVT = MVT::getVectorVT(StVT, (128 / StVT.getSizeInBits())); vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); - SDOperand alignLoadVec = + SDValue alignLoadVec = AlignedLoad(Op, DAG, ST, SN, alignment, chunk_offset, slot_offset, VT, was16aligned); @@ -712,10 +712,10 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { return alignLoadVec; LoadSDNode *LN = cast<LoadSDNode>(alignLoadVec); - SDOperand basePtr = LN->getBasePtr(); - SDOperand the_chain = alignLoadVec.getValue(1); - SDOperand theValue = SN->getValue(); - SDOperand result; + SDValue basePtr = LN->getBasePtr(); + SDValue the_chain = alignLoadVec.getValue(1); + SDValue theValue = SN->getValue(); + SDValue result; if (StVT != VT && (theValue.getOpcode() == ISD::AssertZext @@ -727,9 +727,9 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { chunk_offset &= 0xf; - SDOperand insertEltOffs = DAG.getConstant(chunk_offset, PtrVT); - SDOperand insertEltPtr; - SDOperand insertEltOp; + SDValue insertEltOffs = DAG.getConstant(chunk_offset, PtrVT); + SDValue insertEltPtr; + SDValue insertEltOp; // If the base pointer is already a D-form address, then just create // a new D-form address with a slot offset and the orignal base pointer. @@ -772,73 +772,73 @@ LowerSTORE(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { /*NOTREACHED*/ } - return SDOperand(); + return SDValue(); } /// Generate the address of a constant pool entry. -static SDOperand -LowerConstantPool(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { +static SDValue +LowerConstantPool(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { MVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); Constant *C = CP->getConstVal(); - SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); - SDOperand Zero = DAG.getConstant(0, PtrVT); + SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); + SDValue Zero = DAG.getConstant(0, PtrVT); const TargetMachine &TM = DAG.getTarget(); if (TM.getRelocationModel() == Reloc::Static) { if (!ST->usingLargeMem()) { - // Just return the SDOperand with the constant pool address in it. + // Just return the SDValue with the constant pool address in it. return DAG.getNode(SPUISD::AFormAddr, PtrVT, CPI, Zero); } else { - SDOperand Hi = DAG.getNode(SPUISD::Hi, PtrVT, CPI, Zero); - SDOperand Lo = DAG.getNode(SPUISD::Lo, PtrVT, CPI, Zero); + SDValue Hi = DAG.getNode(SPUISD::Hi, PtrVT, CPI, Zero); + SDValue Lo = DAG.getNode(SPUISD::Lo, PtrVT, CPI, Zero); return DAG.getNode(SPUISD::IndirectAddr, PtrVT, Hi, Lo); } } assert(0 && "LowerConstantPool: Relocation model other than static not supported."); - return SDOperand(); + return SDValue(); } -static SDOperand -LowerJumpTable(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { +static SDValue +LowerJumpTable(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); - SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); - SDOperand Zero = DAG.getConstant(0, PtrVT); + SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); + SDValue Zero = DAG.getConstant(0, PtrVT); const TargetMachine &TM = DAG.getTarget(); if (TM.getRelocationModel() == Reloc::Static) { if (!ST->usingLargeMem()) { return DAG.getNode(SPUISD::AFormAddr, PtrVT, JTI, Zero); } else { - SDOperand Hi = DAG.getNode(SPUISD::Hi, PtrVT, JTI, Zero); - SDOperand Lo = DAG.getNode(SPUISD::Lo, PtrVT, JTI, Zero); + SDValue Hi = DAG.getNode(SPUISD::Hi, PtrVT, JTI, Zero); + SDValue Lo = DAG.getNode(SPUISD::Lo, PtrVT, JTI, Zero); return DAG.getNode(SPUISD::IndirectAddr, PtrVT, Hi, Lo); } } assert(0 && "LowerJumpTable: Relocation model other than static not supported."); - return SDOperand(); + return SDValue(); } -static SDOperand -LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { +static SDValue +LowerGlobalAddress(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { MVT PtrVT = Op.getValueType(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); - SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); + SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); const TargetMachine &TM = DAG.getTarget(); - SDOperand Zero = DAG.getConstant(0, PtrVT); + SDValue Zero = DAG.getConstant(0, PtrVT); if (TM.getRelocationModel() == Reloc::Static) { if (!ST->usingLargeMem()) { return DAG.getNode(SPUISD::AFormAddr, PtrVT, GA, Zero); } else { - SDOperand Hi = DAG.getNode(SPUISD::Hi, PtrVT, GA, Zero); - SDOperand Lo = DAG.getNode(SPUISD::Lo, PtrVT, GA, Zero); + SDValue Hi = DAG.getNode(SPUISD::Hi, PtrVT, GA, Zero); + SDValue Lo = DAG.getNode(SPUISD::Lo, PtrVT, GA, Zero); return DAG.getNode(SPUISD::IndirectAddr, PtrVT, Hi, Lo); } } else { @@ -848,7 +848,7 @@ LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { /*NOTREACHED*/ } - return SDOperand(); + return SDValue(); } //! Custom lower i64 integer constants @@ -856,13 +856,13 @@ LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { This code inserts all of the necessary juggling that needs to occur to load a 64-bit constant into a register. */ -static SDOperand -LowerConstant(SDOperand Op, SelectionDAG &DAG) { +static SDValue +LowerConstant(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); ConstantSDNode *CN = cast<ConstantSDNode>(Op.Val); if (VT == MVT::i64) { - SDOperand T = DAG.getConstant(CN->getValue(), MVT::i64); + SDValue T = DAG.getConstant(CN->getValue(), MVT::i64); return DAG.getNode(SPUISD::EXTRACT_ELT0, VT, DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T)); } else { @@ -873,12 +873,12 @@ LowerConstant(SDOperand Op, SelectionDAG &DAG) { /*NOTREACHED*/ } - return SDOperand(); + return SDValue(); } //! Custom lower double precision floating point constants -static SDOperand -LowerConstantFP(SDOperand Op, SelectionDAG &DAG) { +static SDValue +LowerConstantFP(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); ConstantFPSDNode *FP = cast<ConstantFPSDNode>(Op.Val); @@ -891,14 +891,14 @@ LowerConstantFP(SDOperand Op, SelectionDAG &DAG) { LowerConstant(DAG.getConstant(dbits, MVT::i64), DAG)); } - return SDOperand(); + return SDValue(); } //! Lower MVT::i1, MVT::i8 brcond to a promoted type (MVT::i32, MVT::i16) -static SDOperand -LowerBRCOND(SDOperand Op, SelectionDAG &DAG) +static SDValue +LowerBRCOND(SDValue Op, SelectionDAG &DAG) { - SDOperand Cond = Op.getOperand(1); + SDValue Cond = Op.getOperand(1); MVT CondVT = Cond.getValueType(); MVT CondNVT; @@ -909,17 +909,17 @@ LowerBRCOND(SDOperand Op, SelectionDAG &DAG) DAG.getNode(ISD::ZERO_EXTEND, CondNVT, Op.getOperand(1)), Op.getOperand(2)); } else - return SDOperand(); // Unchanged + return SDValue(); // Unchanged } -static SDOperand -LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) +static SDValue +LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex) { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); - SmallVector<SDOperand, 8> ArgValues; - SDOperand Root = Op.getOperand(0); + SmallVector<SDValue, 8> ArgValues; + SDValue Root = Op.getOperand(0); bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; const unsigned *ArgRegs = SPURegisterInfo::getArgRegs(); @@ -933,7 +933,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) // Add DAG nodes to load the arguments or copy them out of registers. for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { - SDOperand ArgVal; + SDValue ArgVal; bool needsLoad = false; MVT ObjectVT = Op.getValue(ArgNo).getValueType(); unsigned ObjSize = ObjectVT.getSizeInBits()/8; @@ -1026,7 +1026,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) // that we ran out of physical registers of the appropriate type if (needsLoad) { int FI = MFI->CreateFixedObject(ObjSize, ArgOffset); - SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); ArgOffset += StackSlotSize; } @@ -1039,19 +1039,19 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) if (isVarArg) { VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, ArgOffset); - SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); + SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); // If this function is vararg, store any remaining integer argument regs to // their spots on the stack so that they may be loaded by deferencing the // result of va_next. - SmallVector<SDOperand, 8> MemOps; + SmallVector<SDValue, 8> MemOps; for (; ArgRegIdx != NumArgRegs; ++ArgRegIdx) { unsigned VReg = RegInfo.createVirtualRegister(&SPU::GPRCRegClass); RegInfo.addLiveIn(ArgRegs[ArgRegIdx], VReg); - SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); - SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); + SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT); + SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by four for the next argument to store - SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); + SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } if (!MemOps.empty()) @@ -1067,7 +1067,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, int &VarArgsFrameIndex) /// isLSAAddress - Return the immediate to use if the specified /// value is representable as a LSA address. -static SDNode *isLSAAddress(SDOperand Op, SelectionDAG &DAG) { +static SDNode *isLSAAddress(SDValue Op, SelectionDAG &DAG) { ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); if (!C) return 0; @@ -1080,14 +1080,14 @@ static SDNode *isLSAAddress(SDOperand Op, SelectionDAG &DAG) { } static -SDOperand -LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { - SDOperand Chain = Op.getOperand(0); +SDValue +LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { + SDValue Chain = Op.getOperand(0); #if 0 bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0; #endif - SDOperand Callee = Op.getOperand(4); + SDValue Callee = Op.getOperand(4); unsigned NumOps = (Op.getNumOperands() - 5) / 2; unsigned StackSlotSize = SPUFrameInfo::stackSlotSize(); const unsigned *ArgRegs = SPURegisterInfo::getArgRegs(); @@ -1104,7 +1104,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { // Set up a copy of the stack pointer for use loading and storing any // arguments that may not fit in the registers available for argument // passing. - SDOperand StackPtr = DAG.getRegister(SPU::R1, MVT::i32); + SDValue StackPtr = DAG.getRegister(SPU::R1, MVT::i32); // Figure out which arguments are going to go in registers, and which in // memory. @@ -1112,16 +1112,16 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { unsigned ArgRegIdx = 0; // Keep track of registers passing arguments - std::vector<std::pair<unsigned, SDOperand> > RegsToPass; + std::vector<std::pair<unsigned, SDValue> > RegsToPass; // And the arguments passed on the stack - SmallVector<SDOperand, 8> MemOpChains; + SmallVector<SDValue, 8> MemOpChains; for (unsigned i = 0; i != NumOps; ++i) { - SDOperand Arg = Op.getOperand(5+2*i); + SDValue Arg = Op.getOperand(5+2*i); // PtrOff will be used to store the current argument to the stack if a // register cannot be found for it. - SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); + SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, PtrOff); switch (Arg.getValueType().getSimpleVT()) { @@ -1171,14 +1171,14 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { // Build a sequence of copy-to-reg nodes chained together with token chain // and flag operands which copy the outgoing args into the appropriate regs. - SDOperand InFlag; + SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, InFlag); InFlag = Chain.getValue(1); } - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; unsigned CallOpc = SPUISD::CALL; // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every @@ -1187,8 +1187,8 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { GlobalValue *GV = G->getGlobal(); MVT CalleeVT = Callee.getValueType(); - SDOperand Zero = DAG.getConstant(0, PtrVT); - SDOperand GA = DAG.getTargetGlobalAddress(GV, CalleeVT); + SDValue Zero = DAG.getConstant(0, PtrVT); + SDValue GA = DAG.getTargetGlobalAddress(GV, CalleeVT); if (!ST->usingLargeMem()) { // Turn calls to targets that are defined (i.e., have bodies) into BRSL @@ -1214,7 +1214,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { else if (SDNode *Dest = isLSAAddress(Callee, DAG)) { // If this is an absolute destination address that appears to be a legal // local store address, use the munged value. - Callee = SDOperand(Dest, 0); + Callee = SDValue(Dest, 0); } Ops.push_back(Chain); @@ -1240,7 +1240,7 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { if (Op.Val->getValueType(0) != MVT::Other) InFlag = Chain.getValue(1); - SDOperand ResultVals[3]; + SDValue ResultVals[3]; unsigned NumResults = 0; // If the call has results, copy the values out of the ret val registers. @@ -1291,12 +1291,12 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG, const SPUSubtarget *ST) { // Otherwise, merge everything together with a MERGE_VALUES node. ResultVals[NumResults++] = Chain; - SDOperand Res = DAG.getMergeValues(ResultVals, NumResults); + SDValue Res = DAG.getMergeValues(ResultVals, NumResults); return Res.getValue(Op.ResNo); } -static SDOperand -LowerRET(SDOperand Op, SelectionDAG &DAG, TargetMachine &TM) { +static SDValue +LowerRET(SDValue Op, SelectionDAG &DAG, TargetMachine &TM) { SmallVector<CCValAssign, 16> RVLocs; unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg(); @@ -1310,8 +1310,8 @@ LowerRET(SDOperand Op, SelectionDAG &DAG, TargetMachine &TM) { DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } - SDOperand Chain = Op.getOperand(0); - SDOperand Flag; + SDValue Chain = Op.getOperand(0); + SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -1334,7 +1334,7 @@ LowerRET(SDOperand Op, SelectionDAG &DAG, TargetMachine &TM) { static ConstantSDNode * getVecImm(SDNode *N) { - SDOperand OpVal(0, 0); + SDValue OpVal(0, 0); // Check to see if this buildvec has a single non-undef value in its elements. for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { @@ -1357,7 +1357,7 @@ getVecImm(SDNode *N) { /// get_vec_i18imm - Test if this vector is a vector filled with the same value /// and the value fits into an unsigned 18-bit constant, and if so, return the /// constant -SDOperand SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG, +SDValue SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG, MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { uint64_t Value = CN->getValue(); @@ -1366,20 +1366,20 @@ SDOperand SPU::get_vec_u18imm(SDNode *N, SelectionDAG &DAG, uint32_t upper = uint32_t(UValue >> 32); uint32_t lower = uint32_t(UValue); if (upper != lower) - return SDOperand(); + return SDValue(); Value = Value >> 32; } if (Value <= 0x3ffff) return DAG.getConstant(Value, ValueType); } - return SDOperand(); + return SDValue(); } /// get_vec_i16imm - Test if this vector is a vector filled with the same value /// and the value fits into a signed 16-bit constant, and if so, return the /// constant -SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG, +SDValue SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG, MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { int64_t Value = CN->getSignExtended(); @@ -1388,7 +1388,7 @@ SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG, uint32_t upper = uint32_t(UValue >> 32); uint32_t lower = uint32_t(UValue); if (upper != lower) - return SDOperand(); + return SDValue(); Value = Value >> 32; } if (Value >= -(1 << 15) && Value <= ((1 << 15) - 1)) { @@ -1396,13 +1396,13 @@ SDOperand SPU::get_vec_i16imm(SDNode *N, SelectionDAG &DAG, } } - return SDOperand(); + return SDValue(); } /// get_vec_i10imm - Test if this vector is a vector filled with the same value /// and the value fits into a signed 10-bit constant, and if so, return the /// constant -SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG, +SDValue SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG, MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { int64_t Value = CN->getSignExtended(); @@ -1411,14 +1411,14 @@ SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG, uint32_t upper = uint32_t(UValue >> 32); uint32_t lower = uint32_t(UValue); if (upper != lower) - return SDOperand(); + return SDValue(); Value = Value >> 32; } if (isS10Constant(Value)) return DAG.getConstant(Value, ValueType); } - return SDOperand(); + return SDValue(); } /// get_vec_i8imm - Test if this vector is a vector filled with the same value @@ -1428,7 +1428,7 @@ SDOperand SPU::get_vec_i10imm(SDNode *N, SelectionDAG &DAG, /// @note: The incoming vector is v16i8 because that's the only way we can load /// constant vectors. Thus, we test to see if the upper and lower bytes are the /// same value. -SDOperand SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG, +SDValue SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG, MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { int Value = (int) CN->getValue(); @@ -1441,13 +1441,13 @@ SDOperand SPU::get_vec_i8imm(SDNode *N, SelectionDAG &DAG, return DAG.getConstant(Value, ValueType); } - return SDOperand(); + return SDValue(); } /// get_ILHUvec_imm - Test if this vector is a vector filled with the same value /// and the value fits into a signed 16-bit constant, and if so, return the /// constant -SDOperand SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG, +SDValue SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG, MVT ValueType) { if (ConstantSDNode *CN = getVecImm(N)) { uint64_t Value = CN->getValue(); @@ -1457,25 +1457,25 @@ SDOperand SPU::get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG, return DAG.getConstant(Value >> 16, ValueType); } - return SDOperand(); + return SDValue(); } /// get_v4i32_imm - Catch-all for general 32-bit constant vectors -SDOperand SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) { +SDValue SPU::get_v4i32_imm(SDNode *N, SelectionDAG &DAG) { if (ConstantSDNode *CN = getVecImm(N)) { return DAG.getConstant((unsigned) CN->getValue(), MVT::i32); } - return SDOperand(); + return SDValue(); } /// get_v4i32_imm - Catch-all for general 64-bit constant vectors -SDOperand SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) { +SDValue SPU::get_v2i64_imm(SDNode *N, SelectionDAG &DAG) { if (ConstantSDNode *CN = getVecImm(N)) { return DAG.getConstant((unsigned) CN->getValue(), MVT::i64); } - return SDOperand(); + return SDValue(); } // If this is a vector of constants or undefs, get the bits. A bit in @@ -1490,7 +1490,7 @@ static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits(); for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { - SDOperand OpVal = BV->getOperand(i); + SDValue OpVal = BV->getOperand(i); unsigned PartNo = i >= e/2; // In the upper 128 bits? unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. @@ -1589,7 +1589,7 @@ static bool isConstantSplat(const uint64_t Bits128[2], // selects to a single instruction, return Op. Otherwise, if we can codegen // this case more efficiently than a constant pool load, lower it to the // sequence of ops that should be used. -static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); // If this is a vector of constants or undefs, get the bits. A bit in // UndefBits is set if the corresponding element of the vector is an @@ -1603,7 +1603,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { || !isConstantSplat(VectorBits, UndefBits, VT.getVectorElementType().getSizeInBits(), SplatBits, SplatUndef, SplatSize)) - return SDOperand(); // Not a constant vector, not a splat. + return SDValue(); // Not a constant vector, not a splat. switch (VT.getSimpleVT()) { default: @@ -1612,7 +1612,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { assert(SplatSize == 4 && "LowerBUILD_VECTOR: Unexpected floating point vector element."); // NOTE: pretend the constant is an integer. LLVM won't load FP constants - SDOperand T = DAG.getConstant(Value32, MVT::i32); + SDValue T = DAG.getConstant(Value32, MVT::i32); return DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, T, T, T, T)); break; @@ -1622,7 +1622,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { assert(SplatSize == 8 && "LowerBUILD_VECTOR: 64-bit float vector element: unexpected size."); // NOTE: pretend the constant is an integer. LLVM won't load FP constants - SDOperand T = DAG.getConstant(f64val, MVT::i64); + SDValue T = DAG.getConstant(f64val, MVT::i64); return DAG.getNode(ISD::BIT_CONVERT, MVT::v2f64, DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i64, T, T)); break; @@ -1630,7 +1630,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { case MVT::v16i8: { // 8-bit constants have to be expanded to 16-bits unsigned short Value16 = SplatBits | (SplatBits << 8); - SDOperand Ops[8]; + SDValue Ops[8]; for (int i = 0; i < 8; ++i) Ops[i] = DAG.getConstant(Value16, MVT::i16); return DAG.getNode(ISD::BIT_CONVERT, VT, @@ -1642,14 +1642,14 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { Value16 = (unsigned short) (SplatBits & 0xffff); else Value16 = (unsigned short) (SplatBits | (SplatBits << 8)); - SDOperand T = DAG.getConstant(Value16, VT.getVectorElementType()); - SDOperand Ops[8]; + SDValue T = DAG.getConstant(Value16, VT.getVectorElementType()); + SDValue Ops[8]; for (int i = 0; i < 8; ++i) Ops[i] = T; return DAG.getNode(ISD::BUILD_VECTOR, VT, Ops, 8); } case MVT::v4i32: { unsigned int Value = SplatBits; - SDOperand T = DAG.getConstant(Value, VT.getVectorElementType()); + SDValue T = DAG.getConstant(Value, VT.getVectorElementType()); return DAG.getNode(ISD::BUILD_VECTOR, VT, T, T, T, T); } case MVT::v2i64: { @@ -1659,13 +1659,13 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { if (upper == lower) { // Magic constant that can be matched by IL, ILA, et. al. - SDOperand Val = DAG.getTargetConstant(val, MVT::i64); + SDValue Val = DAG.getTargetConstant(val, MVT::i64); return DAG.getNode(ISD::BUILD_VECTOR, VT, Val, Val); } else { - SDOperand LO32; - SDOperand HI32; - SmallVector<SDOperand, 16> ShufBytes; - SDOperand Result; + SDValue LO32; + SDValue HI32; + SmallVector<SDValue, 16> ShufBytes; + SDValue Result; bool upper_special, lower_special; // NOTE: This code creates common-case shuffle masks that can be easily @@ -1678,7 +1678,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Create lower vector if not a special pattern if (!lower_special) { - SDOperand LO32C = DAG.getConstant(lower, MVT::i32); + SDValue LO32C = DAG.getConstant(lower, MVT::i32); LO32 = DAG.getNode(ISD::BIT_CONVERT, VT, DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, LO32C, LO32C, LO32C, LO32C)); @@ -1686,7 +1686,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Create upper vector if not a special pattern if (!upper_special) { - SDOperand HI32C = DAG.getConstant(upper, MVT::i32); + SDValue HI32C = DAG.getConstant(upper, MVT::i32); HI32 = DAG.getNode(ISD::BIT_CONVERT, VT, DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, HI32C, HI32C, HI32C, HI32C)); @@ -1701,7 +1701,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { if (lower_special && upper_special) { // Unhappy situation... both upper and lower are special, so punt with // a target constant: - SDOperand Zero = DAG.getConstant(0, MVT::i32); + SDValue Zero = DAG.getConstant(0, MVT::i32); HI32 = LO32 = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Zero, Zero, Zero, Zero); } @@ -1709,7 +1709,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { for (int i = 0; i < 4; ++i) { uint64_t val = 0; for (int j = 0; j < 4; ++j) { - SDOperand V; + SDValue V; bool process_upper, process_lower; val <<= 8; process_upper = (upper_special && (i & 1) == 0); @@ -1739,7 +1739,7 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { } } - return SDOperand(); + return SDValue(); } /// LowerVECTOR_SHUFFLE - Lower a vector shuffle (V1, V2, V3) to something on @@ -1755,10 +1755,10 @@ static SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { /// element move from V2 into V1. /// \note /// SPUISD::SHUFB is eventually selected as Cell's <i>shufb</i> instructions. -static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { - SDOperand V1 = Op.getOperand(0); - SDOperand V2 = Op.getOperand(1); - SDOperand PermMask = Op.getOperand(2); +static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { + SDValue V1 = Op.getOperand(0); + SDValue V2 = Op.getOperand(1); + SDValue PermMask = Op.getOperand(2); if (V2.getOpcode() == ISD::UNDEF) V2 = V1; @@ -1806,10 +1806,10 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { unsigned VReg = RegInfo.createVirtualRegister(&SPU::R32CRegClass); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Initialize temporary register to 0 - SDOperand InitTempReg = + SDValue InitTempReg = DAG.getCopyToReg(DAG.getEntryNode(), VReg, DAG.getConstant(0, PtrVT)); // Copy register's contents as index in INSERT_MASK: - SDOperand ShufMaskOp = + SDValue ShufMaskOp = DAG.getNode(SPUISD::INSERT_MASK, V1.getValueType(), DAG.getTargetConstant(V2Elt, MVT::i32), DAG.getCopyFromReg(InitTempReg, VReg, PtrVT)); @@ -1819,7 +1819,7 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // Convert the SHUFFLE_VECTOR mask's input element units to the actual bytes. unsigned BytesPerElement = EltVT.getSizeInBits()/8; - SmallVector<SDOperand, 16> ResultMask; + SmallVector<SDValue, 16> ResultMask; for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { unsigned SrcElt; if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) @@ -1833,21 +1833,21 @@ static SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { } } - SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, + SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, &ResultMask[0], ResultMask.size()); return DAG.getNode(SPUISD::SHUFB, V1.getValueType(), V1, V2, VPermMask); } } -static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { - SDOperand Op0 = Op.getOperand(0); // Op0 = the scalar +static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { + SDValue Op0 = Op.getOperand(0); // Op0 = the scalar if (Op0.Val->getOpcode() == ISD::Constant) { // For a constant, build the appropriate constant vector, which will // eventually simplify to a vector register load. ConstantSDNode *CN = cast<ConstantSDNode>(Op0.Val); - SmallVector<SDOperand, 16> ConstVecValues; + SmallVector<SDValue, 16> ConstVecValues; MVT VT; size_t n_copies; @@ -1863,7 +1863,7 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { case MVT::v2f64: n_copies = 2; VT = MVT::f64; break; } - SDOperand CValue = DAG.getConstant(CN->getValue(), VT); + SDValue CValue = DAG.getConstant(CN->getValue(), VT); for (size_t j = 0; j < n_copies; ++j) ConstVecValues.push_back(CValue); @@ -1883,10 +1883,10 @@ static SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { } } - return SDOperand(); + return SDValue(); } -static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerVectorMUL(SDValue Op, SelectionDAG &DAG) { switch (Op.getValueType().getSimpleVT()) { default: cerr << "CellSPU: Unknown vector multiplication, got " @@ -1896,12 +1896,12 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { /*NOTREACHED*/ case MVT::v4i32: { - SDOperand rA = Op.getOperand(0); - SDOperand rB = Op.getOperand(1); - SDOperand HiProd1 = DAG.getNode(SPUISD::MPYH, MVT::v4i32, rA, rB); - SDOperand HiProd2 = DAG.getNode(SPUISD::MPYH, MVT::v4i32, rB, rA); - SDOperand LoProd = DAG.getNode(SPUISD::MPYU, MVT::v4i32, rA, rB); - SDOperand Residual1 = DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd1); + SDValue rA = Op.getOperand(0); + SDValue rB = Op.getOperand(1); + SDValue HiProd1 = DAG.getNode(SPUISD::MPYH, MVT::v4i32, rA, rB); + SDValue HiProd2 = DAG.getNode(SPUISD::MPYH, MVT::v4i32, rB, rA); + SDValue LoProd = DAG.getNode(SPUISD::MPYU, MVT::v4i32, rA, rB); + SDValue Residual1 = DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd1); return DAG.getNode(ISD::ADD, MVT::v4i32, Residual1, HiProd2); break; @@ -1918,22 +1918,22 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { case MVT::v8i16: { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); - SDOperand Chain = Op.getOperand(0); - SDOperand rA = Op.getOperand(0); - SDOperand rB = Op.getOperand(1); + SDValue Chain = Op.getOperand(0); + SDValue rA = Op.getOperand(0); + SDValue rB = Op.getOperand(1); unsigned FSMBIreg = RegInfo.createVirtualRegister(&SPU::VECREGRegClass); unsigned HiProdReg = RegInfo.createVirtualRegister(&SPU::VECREGRegClass); - SDOperand FSMBOp = + SDValue FSMBOp = DAG.getCopyToReg(Chain, FSMBIreg, DAG.getNode(SPUISD::SELECT_MASK, MVT::v8i16, DAG.getConstant(0xcccc, MVT::i16))); - SDOperand HHProd = + SDValue HHProd = DAG.getCopyToReg(FSMBOp, HiProdReg, DAG.getNode(SPUISD::MPYHH, MVT::v8i16, rA, rB)); - SDOperand HHProd_v4i32 = + SDValue HHProd_v4i32 = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, DAG.getCopyFromReg(HHProd, HiProdReg, MVT::v4i32)); @@ -1952,68 +1952,68 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { // is to break it all apart, sign extend, and reassemble the various // intermediate products. case MVT::v16i8: { - SDOperand rA = Op.getOperand(0); - SDOperand rB = Op.getOperand(1); - SDOperand c8 = DAG.getConstant(8, MVT::i32); - SDOperand c16 = DAG.getConstant(16, MVT::i32); + SDValue rA = Op.getOperand(0); + SDValue rB = Op.getOperand(1); + SDValue c8 = DAG.getConstant(8, MVT::i32); + SDValue c16 = DAG.getConstant(16, MVT::i32); - SDOperand LLProd = + SDValue LLProd = DAG.getNode(SPUISD::MPY, MVT::v8i16, DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rA), DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rB)); - SDOperand rALH = DAG.getNode(SPUISD::VEC_SRA, MVT::v8i16, rA, c8); + SDValue rALH = DAG.getNode(SPUISD::VEC_SRA, MVT::v8i16, rA, c8); - SDOperand rBLH = DAG.getNode(SPUISD::VEC_SRA, MVT::v8i16, rB, c8); + SDValue rBLH = DAG.getNode(SPUISD::VEC_SRA, MVT::v8i16, rB, c8); - SDOperand LHProd = + SDValue LHProd = DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16, DAG.getNode(SPUISD::MPY, MVT::v8i16, rALH, rBLH), c8); - SDOperand FSMBmask = DAG.getNode(SPUISD::SELECT_MASK, MVT::v8i16, + SDValue FSMBmask = DAG.getNode(SPUISD::SELECT_MASK, MVT::v8i16, DAG.getConstant(0x2222, MVT::i16)); - SDOperand LoProdParts = + SDValue LoProdParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, DAG.getNode(SPUISD::SELB, MVT::v8i16, LLProd, LHProd, FSMBmask)); - SDOperand LoProdMask = DAG.getConstant(0xffff, MVT::i32); + SDValue LoProdMask = DAG.getConstant(0xffff, MVT::i32); - SDOperand LoProd = + SDValue LoProd = DAG.getNode(ISD::AND, MVT::v4i32, LoProdParts, DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, LoProdMask, LoProdMask, LoProdMask, LoProdMask)); - SDOperand rAH = + SDValue rAH = DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rA), c16); - SDOperand rBH = + SDValue rBH = DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, rB), c16); - SDOperand HLProd = + SDValue HLProd = DAG.getNode(SPUISD::MPY, MVT::v8i16, DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rAH), DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, rBH)); - SDOperand HHProd_1 = + SDValue HHProd_1 = DAG.getNode(SPUISD::MPY, MVT::v8i16, DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rAH, c8)), DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, DAG.getNode(SPUISD::VEC_SRA, MVT::v4i32, rBH, c8))); - SDOperand HHProd = + SDValue HHProd = DAG.getNode(SPUISD::SELB, MVT::v8i16, HLProd, DAG.getNode(SPUISD::VEC_SHL, MVT::v8i16, HHProd_1, c8), FSMBmask); - SDOperand HiProd = + SDValue HiProd = DAG.getNode(SPUISD::VEC_SHL, MVT::v4i32, HHProd, c16); return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, @@ -2022,15 +2022,15 @@ static SDOperand LowerVectorMUL(SDOperand Op, SelectionDAG &DAG) { } } - return SDOperand(); + return SDValue(); } -static SDOperand LowerFDIVf32(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerFDIVf32(SDValue Op, SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); - SDOperand A = Op.getOperand(0); - SDOperand B = Op.getOperand(1); + SDValue A = Op.getOperand(0); + SDValue B = Op.getOperand(1); MVT VT = Op.getValueType(); unsigned VRegBR, VRegC; @@ -2047,13 +2047,13 @@ static SDOperand LowerFDIVf32(SDOperand Op, SelectionDAG &DAG) { // Computes BRcpl = // (Floating Interpolate (FP Reciprocal Estimate B)) - SDOperand BRcpl = + SDValue BRcpl = DAG.getCopyToReg(DAG.getEntryNode(), VRegBR, DAG.getNode(SPUISD::FPInterp, VT, B, DAG.getNode(SPUISD::FPRecipEst, VT, B))); // Computes A * BRcpl and stores in a temporary register - SDOperand AxBRcpl = + SDValue AxBRcpl = DAG.getCopyToReg(BRcpl, VRegC, DAG.getNode(ISD::FMUL, VT, A, DAG.getCopyFromReg(BRcpl, VRegBR, VT))); @@ -2069,11 +2069,11 @@ static SDOperand LowerFDIVf32(SDOperand Op, SelectionDAG &DAG) { DAG.getCopyFromReg(AxBRcpl, VRegC, VT))))); } -static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); - SDOperand N = Op.getOperand(0); - SDOperand Elt = Op.getOperand(1); - SDOperand ShufMask[16]; + SDValue N = Op.getOperand(0); + SDValue Elt = Op.getOperand(1); + SDValue ShufMask[16]; ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt); assert(C != 0 && "LowerEXTRACT_VECTOR_ELT expecting constant SDNode"); @@ -2139,7 +2139,7 @@ static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { ShufMask[i] = ShufMask[i % (prefslot_end + 1)]; } - SDOperand ShufMaskVec = + SDValue ShufMaskVec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, &ShufMask[0], sizeof(ShufMask) / sizeof(ShufMask[0])); @@ -2150,10 +2150,10 @@ static SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { } -static SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { - SDOperand VecOp = Op.getOperand(0); - SDOperand ValOp = Op.getOperand(1); - SDOperand IdxOp = Op.getOperand(2); +static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { + SDValue VecOp = Op.getOperand(0); + SDValue ValOp = Op.getOperand(1); + SDValue IdxOp = Op.getOperand(2); MVT VT = Op.getValueType(); ConstantSDNode *CN = cast<ConstantSDNode>(IdxOp); @@ -2161,9 +2161,9 @@ static SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Use $2 because it's always 16-byte aligned and it's available: - SDOperand PtrBase = DAG.getRegister(SPU::R2, PtrVT); + SDValue PtrBase = DAG.getRegister(SPU::R2, PtrVT); - SDOperand result = + SDValue result = DAG.getNode(SPUISD::SHUFB, VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, ValOp), VecOp, @@ -2176,9 +2176,9 @@ static SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { return result; } -static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) +static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc) { - SDOperand N0 = Op.getOperand(0); // Everything has at least one operand + SDValue N0 = Op.getOperand(0); // Everything has at least one operand assert(Op.getValueType() == MVT::i8); switch (Opc) { @@ -2189,7 +2189,7 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) case ISD::SUB: { // 8-bit subtraction: Promote the arguments up to 16-bits and truncate // the result: - SDOperand N1 = Op.getOperand(1); + SDValue N1 = Op.getOperand(1); N0 = (N0.getOpcode() != ISD::Constant ? DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N0) : DAG.getConstant(cast<ConstantSDNode>(N0)->getValue(), MVT::i16)); @@ -2201,7 +2201,7 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) } case ISD::ROTR: case ISD::ROTL: { - SDOperand N1 = Op.getOperand(1); + SDValue N1 = Op.getOperand(1); unsigned N1Opc; N0 = (N0.getOpcode() != ISD::Constant ? DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, N0) @@ -2210,7 +2210,7 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) N1 = (N1.getOpcode() != ISD::Constant ? DAG.getNode(N1Opc, MVT::i16, N1) : DAG.getConstant(cast<ConstantSDNode>(N1)->getValue(), MVT::i16)); - SDOperand ExpandArg = + SDValue ExpandArg = DAG.getNode(ISD::OR, MVT::i16, N0, DAG.getNode(ISD::SHL, MVT::i16, N0, DAG.getConstant(8, MVT::i16))); @@ -2219,7 +2219,7 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) } case ISD::SRL: case ISD::SHL: { - SDOperand N1 = Op.getOperand(1); + SDValue N1 = Op.getOperand(1); unsigned N1Opc; N0 = (N0.getOpcode() != ISD::Constant ? DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, N0) @@ -2232,7 +2232,7 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) DAG.getNode(Opc, MVT::i16, N0, N1)); } case ISD::SRA: { - SDOperand N1 = Op.getOperand(1); + SDValue N1 = Op.getOperand(1); unsigned N1Opc; N0 = (N0.getOpcode() != ISD::Constant ? DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N0) @@ -2245,7 +2245,7 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) DAG.getNode(Opc, MVT::i16, N0, N1)); } case ISD::MUL: { - SDOperand N1 = Op.getOperand(1); + SDValue N1 = Op.getOperand(1); unsigned N1Opc; N0 = (N0.getOpcode() != ISD::Constant ? DAG.getNode(ISD::SIGN_EXTEND, MVT::i16, N0) @@ -2260,15 +2260,15 @@ static SDOperand LowerI8Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) } } - return SDOperand(); + return SDValue(); } -static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) +static SDValue LowerI64Math(SDValue Op, SelectionDAG &DAG, unsigned Opc) { MVT VT = Op.getValueType(); MVT VecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); - SDOperand Op0 = Op.getOperand(0); + SDValue Op0 = Op.getOperand(0); switch (Opc) { case ISD::ZERO_EXTEND: @@ -2284,7 +2284,7 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) unsigned NewOpc = (Opc == ISD::SIGN_EXTEND ? SPUISD::ROTBYTES_RIGHT_S : SPUISD::ROTQUAD_RZ_BYTES); - SDOperand PromoteScalar = + SDValue PromoteScalar = DAG.getNode(SPUISD::PROMOTE_SCALAR, Op0VecVT, Op0); return DAG.getNode(SPUISD::EXTRACT_ELT0, VT, @@ -2297,11 +2297,11 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) case ISD::ADD: { // Turn operands into vectors to satisfy type checking (shufb works on // vectors) - SDOperand Op0 = + SDValue Op0 = DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(0)); - SDOperand Op1 = + SDValue Op1 = DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(1)); - SmallVector<SDOperand, 16> ShufBytes; + SmallVector<SDValue, 16> ShufBytes; // Create the shuffle mask for "rotating" the borrow up one register slot // once the borrow is generated. @@ -2310,9 +2310,9 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32)); ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32)); - SDOperand CarryGen = + SDValue CarryGen = DAG.getNode(SPUISD::CARRY_GENERATE, MVT::v2i64, Op0, Op1); - SDOperand ShiftedCarry = + SDValue ShiftedCarry = DAG.getNode(SPUISD::SHUFB, MVT::v2i64, CarryGen, CarryGen, DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, @@ -2326,11 +2326,11 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) case ISD::SUB: { // Turn operands into vectors to satisfy type checking (shufb works on // vectors) - SDOperand Op0 = + SDValue Op0 = DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(0)); - SDOperand Op1 = + SDValue Op1 = DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(1)); - SmallVector<SDOperand, 16> ShufBytes; + SmallVector<SDValue, 16> ShufBytes; // Create the shuffle mask for "rotating" the borrow up one register slot // once the borrow is generated. @@ -2339,9 +2339,9 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32)); ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32)); - SDOperand BorrowGen = + SDValue BorrowGen = DAG.getNode(SPUISD::BORROW_GENERATE, MVT::v2i64, Op0, Op1); - SDOperand ShiftedBorrow = + SDValue ShiftedBorrow = DAG.getNode(SPUISD::SHUFB, MVT::v2i64, BorrowGen, BorrowGen, DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, @@ -2353,20 +2353,20 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) } case ISD::SHL: { - SDOperand ShiftAmt = Op.getOperand(1); + SDValue ShiftAmt = Op.getOperand(1); MVT ShiftAmtVT = ShiftAmt.getValueType(); - SDOperand Op0Vec = DAG.getNode(SPUISD::PROMOTE_SCALAR, VecVT, Op0); - SDOperand MaskLower = + SDValue Op0Vec = DAG.getNode(SPUISD::PROMOTE_SCALAR, VecVT, Op0); + SDValue MaskLower = DAG.getNode(SPUISD::SELB, VecVT, Op0Vec, DAG.getConstant(0, VecVT), DAG.getNode(SPUISD::SELECT_MASK, VecVT, DAG.getConstant(0xff00ULL, MVT::i16))); - SDOperand ShiftAmtBytes = + SDValue ShiftAmtBytes = DAG.getNode(ISD::SRL, ShiftAmtVT, ShiftAmt, DAG.getConstant(3, ShiftAmtVT)); - SDOperand ShiftAmtBits = + SDValue ShiftAmtBits = DAG.getNode(ISD::AND, ShiftAmtVT, ShiftAmt, DAG.getConstant(7, ShiftAmtVT)); @@ -2380,13 +2380,13 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) case ISD::SRL: { MVT VT = Op.getValueType(); - SDOperand ShiftAmt = Op.getOperand(1); + SDValue ShiftAmt = Op.getOperand(1); MVT ShiftAmtVT = ShiftAmt.getValueType(); - SDOperand ShiftAmtBytes = + SDValue ShiftAmtBytes = DAG.getNode(ISD::SRL, ShiftAmtVT, ShiftAmt, DAG.getConstant(3, ShiftAmtVT)); - SDOperand ShiftAmtBits = + SDValue ShiftAmtBits = DAG.getNode(ISD::AND, ShiftAmtVT, ShiftAmt, DAG.getConstant(7, ShiftAmtVT)); @@ -2399,9 +2399,9 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) case ISD::SRA: { // Promote Op0 to vector - SDOperand Op0 = + SDValue Op0 = DAG.getNode(SPUISD::PROMOTE_SCALAR, MVT::v2i64, Op.getOperand(0)); - SDOperand ShiftAmt = Op.getOperand(1); + SDValue ShiftAmt = Op.getOperand(1); MVT ShiftVT = ShiftAmt.getValueType(); // Negate variable shift amounts @@ -2410,23 +2410,23 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) DAG.getConstant(0, ShiftVT), ShiftAmt); } - SDOperand UpperHalfSign = + SDValue UpperHalfSign = DAG.getNode(SPUISD::EXTRACT_ELT0, MVT::i32, DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, DAG.getNode(SPUISD::VEC_SRA, MVT::v2i64, Op0, DAG.getConstant(31, MVT::i32)))); - SDOperand UpperHalfSignMask = + SDValue UpperHalfSignMask = DAG.getNode(SPUISD::SELECT_MASK, MVT::v2i64, UpperHalfSign); - SDOperand UpperLowerMask = + SDValue UpperLowerMask = DAG.getNode(SPUISD::SELECT_MASK, MVT::v2i64, DAG.getConstant(0xff00, MVT::i16)); - SDOperand UpperLowerSelect = + SDValue UpperLowerSelect = DAG.getNode(SPUISD::SELB, MVT::v2i64, UpperHalfSignMask, Op0, UpperLowerMask); - SDOperand RotateLeftBytes = + SDValue RotateLeftBytes = DAG.getNode(SPUISD::ROTBYTES_LEFT_BITS, MVT::v2i64, UpperLowerSelect, ShiftAmt); - SDOperand RotateLeftBits = + SDValue RotateLeftBits = DAG.getNode(SPUISD::ROTBYTES_LEFT, MVT::v2i64, RotateLeftBytes, ShiftAmt); @@ -2435,14 +2435,14 @@ static SDOperand LowerI64Math(SDOperand Op, SelectionDAG &DAG, unsigned Opc) } } - return SDOperand(); + return SDValue(); } //! Lower byte immediate operations for v16i8 vectors: -static SDOperand -LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { - SDOperand ConstVec; - SDOperand Arg; +static SDValue +LowerByteImmed(SDValue Op, SelectionDAG &DAG) { + SDValue ConstVec; + SDValue Arg; MVT VT = Op.getValueType(); ConstVec = Op.getOperand(0); @@ -2469,8 +2469,8 @@ LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { && isConstantSplat(VectorBits, UndefBits, VT.getVectorElementType().getSizeInBits(), SplatBits, SplatUndef, SplatSize)) { - SDOperand tcVec[16]; - SDOperand tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8); + SDValue tcVec[16]; + SDValue tc = DAG.getTargetConstant(SplatBits & 0xff, MVT::i8); const size_t tcVecSize = sizeof(tcVec) / sizeof(tcVec[0]); // Turn the BUILD_VECTOR into a set of target constants: @@ -2482,11 +2482,11 @@ LowerByteImmed(SDOperand Op, SelectionDAG &DAG) { } } - return SDOperand(); + return SDValue(); } //! Lower i32 multiplication -static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, MVT VT, +static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG, MVT VT, unsigned Opc) { switch (VT.getSimpleVT()) { default: @@ -2497,8 +2497,8 @@ static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, MVT VT, /*NOTREACHED*/ case MVT::i32: { - SDOperand rA = Op.getOperand(0); - SDOperand rB = Op.getOperand(1); + SDValue rA = Op.getOperand(0); + SDValue rB = Op.getOperand(1); return DAG.getNode(ISD::ADD, MVT::i32, DAG.getNode(ISD::ADD, MVT::i32, @@ -2508,7 +2508,7 @@ static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, MVT VT, } } - return SDOperand(); + return SDValue(); } //! Custom lowering for CTPOP (count population) @@ -2517,7 +2517,7 @@ static SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG, MVT VT, operand. SPU has such an instruction, but it counts the number of ones per byte, which then have to be accumulated. */ -static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); MVT vecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); @@ -2525,11 +2525,11 @@ static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) { default: assert(false && "Invalid value type!"); case MVT::i8: { - SDOperand N = Op.getOperand(0); - SDOperand Elt0 = DAG.getConstant(0, MVT::i32); + SDValue N = Op.getOperand(0); + SDValue Elt0 = DAG.getConstant(0, MVT::i32); - SDOperand Promote = DAG.getNode(SPUISD::PROMOTE_SCALAR, vecVT, N, N); - SDOperand CNTB = DAG.getNode(SPUISD::CNTB, vecVT, Promote); + SDValue Promote = DAG.getNode(SPUISD::PROMOTE_SCALAR, vecVT, N, N); + SDValue CNTB = DAG.getNode(SPUISD::CNTB, vecVT, Promote); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i8, CNTB, Elt0); } @@ -2540,23 +2540,23 @@ static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) { unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R16CRegClass); - SDOperand N = Op.getOperand(0); - SDOperand Elt0 = DAG.getConstant(0, MVT::i16); - SDOperand Mask0 = DAG.getConstant(0x0f, MVT::i16); - SDOperand Shift1 = DAG.getConstant(8, MVT::i16); + SDValue N = Op.getOperand(0); + SDValue Elt0 = DAG.getConstant(0, MVT::i16); + SDValue Mask0 = DAG.getConstant(0x0f, MVT::i16); + SDValue Shift1 = DAG.getConstant(8, MVT::i16); - SDOperand Promote = DAG.getNode(SPUISD::PROMOTE_SCALAR, vecVT, N, N); - SDOperand CNTB = DAG.getNode(SPUISD::CNTB, vecVT, Promote); + SDValue Promote = DAG.getNode(SPUISD::PROMOTE_SCALAR, vecVT, N, N); + SDValue CNTB = DAG.getNode(SPUISD::CNTB, vecVT, Promote); // CNTB_result becomes the chain to which all of the virtual registers // CNTB_reg, SUM1_reg become associated: - SDOperand CNTB_result = + SDValue CNTB_result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, CNTB, Elt0); - SDOperand CNTB_rescopy = + SDValue CNTB_rescopy = DAG.getCopyToReg(CNTB_result, CNTB_reg, CNTB_result); - SDOperand Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i16); + SDValue Tmp1 = DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i16); return DAG.getNode(ISD::AND, MVT::i16, DAG.getNode(ISD::ADD, MVT::i16, @@ -2573,39 +2573,39 @@ static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) { unsigned CNTB_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass); unsigned SUM1_reg = RegInfo.createVirtualRegister(&SPU::R32CRegClass); - SDOperand N = Op.getOperand(0); - SDOperand Elt0 = DAG.getConstant(0, MVT::i32); - SDOperand Mask0 = DAG.getConstant(0xff, MVT::i32); - SDOperand Shift1 = DAG.getConstant(16, MVT::i32); - SDOperand Shift2 = DAG.getConstant(8, MVT::i32); + SDValue N = Op.getOperand(0); + SDValue Elt0 = DAG.getConstant(0, MVT::i32); + SDValue Mask0 = DAG.getConstant(0xff, MVT::i32); + SDValue Shift1 = DAG.getConstant(16, MVT::i32); + SDValue Shift2 = DAG.getConstant(8, MVT::i32); - SDOperand Promote = DAG.getNode(SPUISD::PROMOTE_SCALAR, vecVT, N, N); - SDOperand CNTB = DAG.getNode(SPUISD::CNTB, vecVT, Promote); + SDValue Promote = DAG.getNode(SPUISD::PROMOTE_SCALAR, vecVT, N, N); + SDValue CNTB = DAG.getNode(SPUISD::CNTB, vecVT, Promote); // CNTB_result becomes the chain to which all of the virtual registers // CNTB_reg, SUM1_reg become associated: - SDOperand CNTB_result = + SDValue CNTB_result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, CNTB, Elt0); - SDOperand CNTB_rescopy = + SDValue CNTB_rescopy = DAG.getCopyToReg(CNTB_result, CNTB_reg, CNTB_result); - SDOperand Comp1 = + SDValue Comp1 = DAG.getNode(ISD::SRL, MVT::i32, DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32), Shift1); - SDOperand Sum1 = + SDValue Sum1 = DAG.getNode(ISD::ADD, MVT::i32, Comp1, DAG.getCopyFromReg(CNTB_rescopy, CNTB_reg, MVT::i32)); - SDOperand Sum1_rescopy = + SDValue Sum1_rescopy = DAG.getCopyToReg(CNTB_result, SUM1_reg, Sum1); - SDOperand Comp2 = + SDValue Comp2 = DAG.getNode(ISD::SRL, MVT::i32, DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32), Shift2); - SDOperand Sum2 = + SDValue Sum2 = DAG.getNode(ISD::ADD, MVT::i32, Comp2, DAG.getCopyFromReg(Sum1_rescopy, SUM1_reg, MVT::i32)); @@ -2616,13 +2616,13 @@ static SDOperand LowerCTPOP(SDOperand Op, SelectionDAG &DAG) { break; } - return SDOperand(); + return SDValue(); } /// LowerOperation - Provide custom lowering hooks for some operations. /// -SDOperand -SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) +SDValue +SPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { unsigned Opc = (unsigned) Op.getOpcode(); MVT VT = Op.getValueType(); @@ -2718,14 +2718,14 @@ SPUTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) return LowerCTPOP(Op, DAG); } - return SDOperand(); + return SDValue(); } //===----------------------------------------------------------------------===// // Target Optimization Hooks //===----------------------------------------------------------------------===// -SDOperand +SDValue SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { #if 0 @@ -2733,23 +2733,23 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const #endif const SPUSubtarget *ST = SPUTM.getSubtargetImpl(); SelectionDAG &DAG = DCI.DAG; - SDOperand Op0 = N->getOperand(0); // everything has at least one operand - SDOperand Result; // Initially, NULL result + SDValue Op0 = N->getOperand(0); // everything has at least one operand + SDValue Result; // Initially, NULL result switch (N->getOpcode()) { default: break; case ISD::ADD: { - SDOperand Op1 = N->getOperand(1); + SDValue Op1 = N->getOperand(1); if (isa<ConstantSDNode>(Op1) && Op0.getOpcode() == SPUISD::IndirectAddr) { - SDOperand Op01 = Op0.getOperand(1); + SDValue Op01 = Op0.getOperand(1); if (Op01.getOpcode() == ISD::Constant || Op01.getOpcode() == ISD::TargetConstant) { // (add <const>, (SPUindirect <arg>, <const>)) -> // (SPUindirect <arg>, <const + const>) ConstantSDNode *CN0 = cast<ConstantSDNode>(Op1); ConstantSDNode *CN1 = cast<ConstantSDNode>(Op01); - SDOperand combinedConst = + SDValue combinedConst = DAG.getConstant(CN0->getValue() + CN1->getValue(), Op0.getValueType()); @@ -2762,14 +2762,14 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const } } else if (isa<ConstantSDNode>(Op0) && Op1.getOpcode() == SPUISD::IndirectAddr) { - SDOperand Op11 = Op1.getOperand(1); + SDValue Op11 = Op1.getOperand(1); if (Op11.getOpcode() == ISD::Constant || Op11.getOpcode() == ISD::TargetConstant) { // (add (SPUindirect <arg>, <const>), <const>) -> // (SPUindirect <arg>, <const + const>) ConstantSDNode *CN0 = cast<ConstantSDNode>(Op0); ConstantSDNode *CN1 = cast<ConstantSDNode>(Op11); - SDOperand combinedConst = + SDValue combinedConst = DAG.getConstant(CN0->getValue() + CN1->getValue(), Op0.getValueType()); @@ -2827,7 +2827,7 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const case SPUISD::VEC_SRA: case SPUISD::ROTQUAD_RZ_BYTES: case SPUISD::ROTQUAD_RZ_BITS: { - SDOperand Op1 = N->getOperand(1); + SDValue Op1 = N->getOperand(1); if (isa<ConstantSDNode>(Op1)) { // Kill degenerate vector shifts: @@ -2849,9 +2849,9 @@ SPUTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const // (SPUpromote_scalar (any|sign|zero_extend (SPUextract_elt0 <arg>))) -> // <arg> // but only if the SPUpromote_scalar and <arg> types match. - SDOperand Op00 = Op0.getOperand(0); + SDValue Op00 = Op0.getOperand(0); if (Op00.getOpcode() == SPUISD::EXTRACT_ELT0) { - SDOperand Op000 = Op00.getOperand(0); + SDValue Op000 = Op00.getOperand(0); if (Op000.getValueType() == N->getValueType(0)) { Result = Op000; } @@ -2932,7 +2932,7 @@ SPUTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, //! Compute used/known bits for a SPU operand void -SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, +SPUTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -2955,7 +2955,7 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, #endif case SPUISD::PROMOTE_SCALAR: { - SDOperand Op0 = Op.getOperand(0); + SDValue Op0 = Op.getOperand(0); MVT Op0VT = Op0.getValueType(); unsigned Op0VTBits = Op0VT.getSizeInBits(); uint64_t InMask = Op0VT.getIntegerVTBitMask(); @@ -3007,9 +3007,9 @@ SPUTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, // LowerAsmOperandForConstraint void -SPUTargetLowering::LowerAsmOperandForConstraint(SDOperand Op, +SPUTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, - std::vector<SDOperand> &Ops, + std::vector<SDValue> &Ops, SelectionDAG &DAG) const { // Default, for the time being, to the base class handler TargetLowering::LowerAsmOperandForConstraint(Op, ConstraintLetter, Ops, DAG); diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h index 5c41c29c51..814c9ba97a 100644 --- a/lib/Target/CellSPU/SPUISelLowering.h +++ b/lib/Target/CellSPU/SPUISelLowering.h @@ -78,18 +78,18 @@ namespace llvm { /// Predicates that are used for node matching: namespace SPU { - SDOperand get_vec_u18imm(SDNode *N, SelectionDAG &DAG, + SDValue get_vec_u18imm(SDNode *N, SelectionDAG &DAG, MVT ValueType); - SDOperand get_vec_i16imm(SDNode *N, SelectionDAG &DAG, + SDValue get_vec_i16imm(SDNode *N, SelectionDAG &DAG, MVT ValueType); - SDOperand get_vec_i10imm(SDNode *N, SelectionDAG &DAG, + SDValue get_vec_i10imm(SDNode *N, SelectionDAG &DAG, MVT ValueType); - SDOperand get_vec_i8imm(SDNode *N, SelectionDAG &DAG, + SDValue get_vec_i8imm(SDNode *N, SelectionDAG &DAG, MVT ValueType); - SDOperand get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG, + SDValue get_ILHUvec_imm(SDNode *N, SelectionDAG &DAG, MVT ValueType); - SDOperand get_v4i32_imm(SDNode *N, SelectionDAG &DAG); - SDOperand get_v2i64_imm(SDNode *N, SelectionDAG &DAG); + SDValue get_v4i32_imm(SDNode *N, SelectionDAG &DAG); + SDValue get_v2i64_imm(SDNode *N, SelectionDAG &DAG); } class SPUTargetMachine; // forward dec'l. @@ -109,15 +109,15 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ValueType for ISD::SETCC - virtual MVT getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDValue &) const; /// LowerOperation - Provide custom lowering hooks for some operations. /// - virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); - virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; - virtual void computeMaskedBitsForTargetNode(const SDOperand Op, + virtual void computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -130,8 +130,8 @@ namespace llvm { getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const; - void LowerAsmOperandForConstraint(SDOperand Op, char ConstraintLetter, - std::vector<SDOperand> &Ops, + void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, + std::vector<SDValue> &Ops, SelectionDAG &DAG) const; /// isLegalAddressImmediate - Return true if the integer value can be used diff --git a/lib/Target/CellSPU/SPUOperands.td b/lib/Target/CellSPU/SPUOperands.td index d17faac861..d9baf454a7 100644 --- a/lib/Target/CellSPU/SPUOperands.td +++ b/lib/Target/CellSPU/SPUOperands.td @@ -16,7 +16,7 @@ def LO16 : SDNodeXForm<imm, [{ }]>; def LO16_vec : SDNodeXForm<scalar_to_vector, [{ - SDOperand OpVal(0, 0); + SDValue OpVal(0, 0); // Transformation function: get the low 16 bit immediate from a build_vector // node. @@ -43,7 +43,7 @@ def HI16 : SDNodeXForm<imm, [{ // Transformation function: shift the high 16 bit immediate from a build_vector // node into the low 16 bits, and return a 16-bit constant. def HI16_vec : SDNodeXForm<scalar_to_vector, [{ - SDOperand OpVal(0, 0); + SDValue OpVal(0, 0); assert(N->getOpcode() == ISD::BUILD_VECTOR && "HI16_vec got something other than a BUILD_VECTOR"); diff --git a/lib/Target/IA64/IA64ISelDAGToDAG.cpp b/lib/Target/IA64/IA64ISelDAGToDAG.cpp index 8f6840ec94..c5c311466d 100644 --- a/lib/Target/IA64/IA64ISelDAGToDAG.cpp +++ b/lib/Target/IA64/IA64ISelDAGToDAG.cpp @@ -51,19 +51,19 @@ namespace { /// getI64Imm - Return a target constant with the specified value, of type /// i64. - inline SDOperand getI64Imm(uint64_t Imm) { + inline SDValue getI64Imm(uint64_t Imm) { return CurDAG->getTargetConstant(Imm, MVT::i64); } /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC /// base register. Return the virtual register that holds this value. - // SDOperand getGlobalBaseReg(); TODO: hmm + // SDValue getGlobalBaseReg(); TODO: hmm // Select - Convert the specified operand from a target-independent to a // target-specific node if it hasn't already been changed. - SDNode *Select(SDOperand N); + SDNode *Select(SDValue N); - SDNode *SelectIntImmediateExpr(SDOperand LHS, SDOperand RHS, + SDNode *SelectIntImmediateExpr(SDValue LHS, SDValue RHS, unsigned OCHi, unsigned OCLo, bool IsArithmetic = false, bool Negate = false); @@ -71,12 +71,12 @@ namespace { /// SelectCC - Select a comparison of the specified values with the /// specified condition code, returning the CR# of the expression. - SDOperand SelectCC(SDOperand LHS, SDOperand RHS, ISD::CondCode CC); + SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC); /// SelectAddr - Given the specified address, return the two operands for a /// load/store instruction, and return true if it should be an indexed [r+r] /// operation. - bool SelectAddr(SDOperand Addr, SDOperand &Op1, SDOperand &Op2); + bool SelectAddr(SDValue Addr, SDValue &Op1, SDValue &Op2); /// InstructionSelect - This callback is invoked by /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. @@ -90,7 +90,7 @@ namespace { #include "IA64GenDAGISel.inc" private: - SDNode *SelectDIV(SDOperand Op); + SDNode *SelectDIV(SDValue Op); }; } @@ -104,11 +104,11 @@ void IA64DAGToDAGISel::InstructionSelect(SelectionDAG &DAG) { DAG.RemoveDeadNodes(); } -SDNode *IA64DAGToDAGISel::SelectDIV(SDOperand Op) { +SDNode *IA64DAGToDAGISel::SelectDIV(SDValue Op) { SDNode *N = Op.Val; - SDOperand Chain = N->getOperand(0); - SDOperand Tmp1 = N->getOperand(0); - SDOperand Tmp2 = N->getOperand(1); + SDValue Chain = N->getOperand(0); + SDValue Tmp1 = N->getOperand(0); + SDValue Tmp2 = N->getOperand(1); AddToISelQueue(Chain); AddToISelQueue(Tmp1); @@ -133,40 +133,40 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDOperand Op) { // TODO: check for integer divides by powers of 2 (or other simple patterns?) - SDOperand TmpPR, TmpPR2; - SDOperand TmpF1, TmpF2, TmpF3, TmpF4, TmpF5, TmpF6, TmpF7, TmpF8; - SDOperand TmpF9, TmpF10,TmpF11,TmpF12,TmpF13,TmpF14,TmpF15; + SDValue TmpPR, TmpPR2; + SDValue TmpF1, TmpF2, TmpF3, TmpF4, TmpF5, TmpF6, TmpF7, TmpF8; + SDValue TmpF9, TmpF10,TmpF11,TmpF12,TmpF13,TmpF14,TmpF15; SDNode *Result; // we'll need copies of F0 and F1 - SDOperand F0 = CurDAG->getRegister(IA64::F0, MVT::f64); - SDOperand F1 = CurDAG->getRegister(IA64::F1, MVT::f64); + SDValue F0 = CurDAG->getRegister(IA64::F0, MVT::f64); + SDValue F1 = CurDAG->getRegister(IA64::F1, MVT::f64); // OK, emit some code: if(!isFP) { // first, load the inputs into FP regs. TmpF1 = - SDOperand(CurDAG->getTargetNode(IA64::SETFSIG, MVT::f64, Tmp1), 0); + SDValue(CurDAG->getTargetNode(IA64::SETFSIG, MVT::f64, Tmp1), 0); Chain = TmpF1.getValue(1); TmpF2 = - SDOperand(CurDAG->getTargetNode(IA64::SETFSIG, MVT::f64, Tmp2), 0); + SDValue(CurDAG->getTargetNode(IA64::SETFSIG, MVT::f64, Tmp2), 0); Chain = TmpF2.getValue(1); // next, convert the inputs to FP if(isSigned) { TmpF3 = - SDOperand(CurDAG->getTargetNode(IA64::FCVTXF, MVT::f64, TmpF1), 0); + SDValue(CurDAG->getTargetNode(IA64::FCVTXF, MVT::f64, TmpF1), 0); Chain = TmpF3.getValue(1); TmpF4 = - SDOperand(CurDAG->getTargetNode(IA64::FCVTXF, MVT::f64, TmpF2), 0); + SDValue(CurDAG->getTargetNode(IA64::FCVTXF, MVT::f64, TmpF2), 0); Chain = TmpF4.getValue(1); } else { // is unsigned TmpF3 = - SDOperand(CurDAG->getTargetNode(IA64::FCVTXUFS1, MVT::f64, TmpF1), 0); + SDValue(CurDAG->getTargetNode(IA64::FCVTXUFS1, MVT::f64, TmpF1), 0); Chain = TmpF3.getValue(1); TmpF4 = - SDOperand(CurDAG->getTargetNode(IA64::FCVTXUFS1, MVT::f64, TmpF2), 0); + SDValue(CurDAG->getTargetNode(IA64::FCVTXUFS1, MVT::f64, TmpF2), 0); Chain = TmpF4.getValue(1); } @@ -179,39 +179,39 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDOperand Op) { // we start by computing an approximate reciprocal (good to 9 bits?) // note, this instruction writes _both_ TmpF5 (answer) and TmpPR (predicate) if(isFP) - TmpF5 = SDOperand(CurDAG->getTargetNode(IA64::FRCPAS0, MVT::f64, MVT::i1, + TmpF5 = SDValue(CurDAG->getTargetNode(IA64::FRCPAS0, MVT::f64, MVT::i1, TmpF3, TmpF4), 0); else - TmpF5 = SDOperand(CurDAG->getTargetNode(IA64::FRCPAS1, MVT::f64, MVT::i1, + TmpF5 = SDValue(CurDAG->getTargetNode(IA64::FRCPAS1, MVT::f64, MVT::i1, TmpF3, TmpF4), 0); TmpPR = TmpF5.getValue(1); Chain = TmpF5.getValue(2); - SDOperand minusB; + SDValue minusB; if(isModulus) { // for remainders, it'll be handy to have // copies of -input_b - minusB = SDOperand(CurDAG->getTargetNode(IA64::SUB, MVT::i64, + minusB = SDValue(CurDAG->getTargetNode(IA64::SUB, MVT::i64, CurDAG->getRegister(IA64::r0, MVT::i64), Tmp2), 0); Chain = minusB.getValue(1); } - SDOperand TmpE0, TmpY1, TmpE1, TmpY2; + SDValue TmpE0, TmpY1, TmpE1, TmpY2; - SDOperand OpsE0[] = { TmpF4, TmpF5, F1, TmpPR }; - TmpE0 = SDOperand(CurDAG->getTargetNode(IA64::CFNMAS1, MVT::f64, + SDValue OpsE0[] = { TmpF4, TmpF5, F1, TmpPR }; + TmpE0 = SDValue(CurDAG->getTargetNode(IA64::CFNMAS1, MVT::f64, OpsE0, 4), 0); Chain = TmpE0.getValue(1); - SDOperand OpsY1[] = { TmpF5, TmpE0, TmpF5, TmpPR }; - TmpY1 = SDOperand(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + SDValue OpsY1[] = { TmpF5, TmpE0, TmpF5, TmpPR }; + TmpY1 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, OpsY1, 4), 0); Chain = TmpY1.getValue(1); - SDOperand OpsE1[] = { TmpE0, TmpE0, F0, TmpPR }; - TmpE1 = SDOperand(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + SDValue OpsE1[] = { TmpE0, TmpE0, F0, TmpPR }; + TmpE1 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, OpsE1, 4), 0); Chain = TmpE1.getValue(1); - SDOperand OpsY2[] = { TmpY1, TmpE1, TmpY1, TmpPR }; - TmpY2 = SDOperand(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + SDValue OpsY2[] = { TmpY1, TmpE1, TmpY1, TmpPR }; + TmpY2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, OpsY2, 4), 0); Chain = TmpY2.getValue(1); @@ -219,53 +219,53 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDOperand Op) { if(isModulus) assert(0 && "Sorry, try another FORTRAN compiler."); - SDOperand TmpE2, TmpY3, TmpQ0, TmpR0; + SDValue TmpE2, TmpY3, TmpQ0, TmpR0; - SDOperand OpsE2[] = { TmpE1, TmpE1, F0, TmpPR }; - TmpE2 = SDOperand(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + SDValue OpsE2[] = { TmpE1, TmpE1, F0, TmpPR }; + TmpE2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, OpsE2, 4), 0); Chain = TmpE2.getValue(1); - SDOperand OpsY3[] = { TmpY2, TmpE2, TmpY2, TmpPR }; - TmpY3 = SDOperand(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + SDValue OpsY3[] = { TmpY2, TmpE2, TmpY2, TmpPR }; + TmpY3 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, OpsY3, 4), 0); Chain = TmpY3.getValue(1); - SDOperand OpsQ0[] = { Tmp1, TmpY3, F0, TmpPR }; + SDValue OpsQ0[] = { Tmp1, TmpY3, F0, TmpPR }; TmpQ0 = - SDOperand(CurDAG->getTargetNode(IA64::CFMADS1, MVT::f64, // double prec! + SDValue(CurDAG->getTargetNode(IA64::CFMADS1, MVT::f64, // double prec! OpsQ0, 4), 0); Chain = TmpQ0.getValue(1); - SDOperand OpsR0[] = { Tmp2, TmpQ0, Tmp1, TmpPR }; + SDValue OpsR0[] = { Tmp2, TmpQ0, Tmp1, TmpPR }; TmpR0 = - SDOperand(CurDAG->getTargetNode(IA64::CFNMADS1, MVT::f64, // double prec! + SDValue(CurDAG->getTargetNode(IA64::CFNMADS1, MVT::f64, // double prec! OpsR0, 4), 0); Chain = TmpR0.getValue(1); // we want Result to have the same target register as the frcpa, so // we two-address hack it. See the comment "for this to work..." on // page 48 of Intel application note #245415 - SDOperand Ops[] = { TmpF5, TmpY3, TmpR0, TmpQ0, TmpPR }; + SDValue Ops[] = { TmpF5, TmpY3, TmpR0, TmpQ0, TmpPR }; Result = CurDAG->getTargetNode(IA64::TCFMADS0, MVT::f64, // d.p. s0 rndg! Ops, 5); - Chain = SDOperand(Result, 1); + Chain = SDValue(Result, 1); return Result; // XXX: early exit! } else { // this is *not* an FP divide, so there's a bit left to do: - SDOperand TmpQ2, TmpR2, TmpQ3, TmpQ; + SDValue TmpQ2, TmpR2, TmpQ3, TmpQ; - SDOperand OpsQ2[] = { TmpF3, TmpY2, F0, TmpPR }; - TmpQ2 = SDOperand(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, + SDValue OpsQ2[] = { TmpF3, TmpY2, F0, TmpPR }; + TmpQ2 = SDValue(CurDAG->getTargetNode(IA64::CFMAS1, MVT::f64, OpsQ2, 4), 0); Chain = TmpQ2.getValue(1); - SDOperand OpsR2[] = { TmpF4, TmpQ2, TmpF3, TmpPR }; - TmpR2 = SDOperand(CurDAG->getTargetNode(IA64::CFNMAS1, MVT::f64, + SDValue OpsR2[] = { TmpF4, TmpQ2, TmpF3, TmpPR }; + TmpR2 = SDValue(CurDAG->getTargetNode(IA64::CFNMAS1, MVT::f64, OpsR2, 4), 0); Chain = TmpR2.getValue(1); // we want TmpQ3 to have the same target register as the frcpa? maybe we // should two-address hack it. See the comment "for this to work..." on page // 48 of Intel application note #245415 - SDOperand OpsQ3[] = { TmpF5, TmpR2, TmpY2, TmpQ2, TmpPR }; - TmpQ3 = SDOperand(CurDAG->getTargetNode(IA64::TCFMAS1, MVT::f64, + SDValue OpsQ3[] = { TmpF5, TmpR2, TmpY2, TmpQ2, TmpPR }; + TmpQ3 = SDValue(CurDAG->getTargetNode(IA64::TCFMAS1, MVT::f64, OpsQ3, 5), 0); Chain = TmpQ3.getValue(1); @@ -274,27 +274,27 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDOperand Op) { // arguments. Other fun bugs may also appear, e.g. 0/x = x, not 0. if(isSigned) - TmpQ = SDOperand(CurDAG->getTargetNode(IA64::FCVTFXTRUNCS1, + TmpQ = SDValue(CurDAG->getTargetNode(IA64::FCVTFXTRUNCS1, MVT::f64, TmpQ3), 0); else - TmpQ = SDOperand(CurDAG->getTargetNode(IA64::FCVTFXUTRUNCS1, + TmpQ = SDValue(CurDAG->getTargetNode(IA64::FCVTFXUTRUNCS1, MVT::f64, TmpQ3), 0); Chain = TmpQ.getValue(1); if(isModulus) { - SDOperand FPminusB = - SDOperand(CurDAG->getTargetNode(IA64::SETFSIG, MVT::f64, minusB), 0); + SDValue FPminusB = + SDValue(CurDAG->getTargetNode(IA64::SETFSIG, MVT::f64, minusB), 0); Chain = FPminusB.getValue(1); - SDOperand Remainder = - SDOperand(CurDAG->getTargetNode(IA64::XMAL, MVT::f64, + SDValue Remainder = + SDValue(CurDAG->getTargetNode(IA64::XMAL, MVT::f64, TmpQ, FPminusB, TmpF1), 0); Chain = Remainder.getValue(1); Result = CurDAG->getTargetNode(IA64::GETFSIG, MVT::i64, Remainder); - Chain = SDOperand(Result, 1); + Chain = SDValue(Result, 1); } else { // just an integer divide Result = CurDAG->getTargetNode(IA64::GETFSIG, MVT::i64, TmpQ); - Chain = SDOperand(Result, 1); + Chain = SDValue(Result, 1); } return Result; @@ -303,7 +303,7 @@ SDNode *IA64DAGToDAGISel::SelectDIV(SDOperand Op) { // Select - Convert the specified operand from a target-independent to a // target-specific node if it hasn't already been changed. -SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { +SDNode *IA64DAGToDAGISel::Select(SDValue Op) { SDNode *N = Op.Val; if (N->isMachineOpcode()) return NULL; // Already selected. @@ -312,8 +312,8 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { default: break; case IA64ISD::BRCALL: { // XXX: this is also a hack! - SDOperand Chain = N->getOperand(0); - SDOperand InFlag; // Null incoming flag value. + SDValue Chain = N->getOperand(0); + SDValue InFlag; // Null incoming flag value. AddToISelQueue(Chain); if(N->getNumOperands()==3) { // we have an incoming chain, callee and flag @@ -322,7 +322,7 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { } unsigned CallOpcode; - SDOperand CallOperand; + SDValue CallOperand; // if we can call directly, do so if (GlobalAddressSDNode *GASD = @@ -338,22 +338,22 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { // otherwise we need to load the function descriptor, // load the branch target (function)'s entry point and GP, // branch (call) then restore the GP - SDOperand FnDescriptor = N->getOperand(1); + SDValue FnDescriptor = N->getOperand(1); AddToISelQueue(FnDescriptor); // load the branch target's entry point [mem] and // GP value [mem+8] - SDOperand targetEntryPoint= - SDOperand(CurDAG->getTargetNode(IA64::LD8, MVT::i64, MVT::Other, + SDValue targetEntryPoint= + SDValue(CurDAG->getTargetNode(IA64::LD8, MVT::i64, MVT::Other, FnDescriptor, CurDAG->getEntryNode()), 0); Chain = targetEntryPoint.getValue(1); - SDOperand targetGPAddr= - SDOperand(CurDAG->getTargetNode(IA64::ADDS, MVT::i64, + SDValue targetGPAddr= + SDValue(CurDAG->getTargetNode(IA64::ADDS, MVT::i64, FnDescriptor, CurDAG->getConstant(8, MVT::i64)), 0); Chain = targetGPAddr.getValue(1); - SDOperand targetGP = - SDOperand(CurDAG->getTargetNode(IA64::LD8, MVT::i64,MVT::Other, + SDValue targetGP = + SDValue(CurDAG->getTargetNode(IA64::LD8, MVT::i64,MVT::Other, targetGPAddr, CurDAG->getEntryNode()), 0); Chain = targetGP.getValue(1); @@ -368,14 +368,14 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { // Finally, once everything is setup, emit the call itself if(InFlag.Val) - Chain = SDOperand(CurDAG->getTargetNode(CallOpcode, MVT::Other, MVT::Flag, + Chain = SDValue(CurDAG->getTargetNode(CallOpcode, MVT::Other, MVT::Flag, CallOperand, InFlag), 0); else // there might be no arguments - Chain = SDOperand(CurDAG->getTargetNode(CallOpcode, MVT::Other, MVT::Flag, + Chain = SDValue(CurDAG->getTargetNode(CallOpcode, MVT::Other, MVT::Flag, CallOperand, Chain), 0); InFlag = Chain.getValue(1); - std::vector<SDOperand> CallResults; + std::vector<SDValue> CallResults; CallResults.push_back(Chain); CallResults.push_back(InFlag); @@ -386,7 +386,7 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { } case IA64ISD::GETFD: { - SDOperand Input = N->getOperand(0); + SDValue Input = N->getOperand(0); AddToISelQueue(Input); return CurDAG->getTargetNode(IA64::GETFD, MVT::i64, Input); } @@ -399,9 +399,9 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { return SelectDIV(Op); case ISD::TargetConstantFP: { - SDOperand Chain = CurDAG->getEntryNode(); // this is a constant, so.. + SDValue Chain = CurDAG->getEntryNode(); // this is a constant, so.. - SDOperand V; + SDValue V; ConstantFPSDNode* N2 = cast<ConstantFPSDNode>(N); if (N2->getValueAPF().isPosZero()) { V = CurDAG->getCopyFromReg(Chain, IA64::F0, MVT::f64); @@ -411,7 +411,7 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { } else assert(0 && "Unexpected FP constant!"); - ReplaceUses(SDOperand(N, 0), V); + ReplaceUses(SDValue(N, 0), V); return 0; } @@ -429,7 +429,7 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { // (ia64 doesn't need one) ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N); Constant *C = CP->getConstVal(); - SDOperand CPI = CurDAG->getTargetConstantPool(C, MVT::i64, + SDValue CPI = CurDAG->getTargetConstantPool(C, MVT::i64, CP->getAlignment()); return CurDAG->getTargetNode(IA64::ADDL_GA, MVT::i64, // ? CurDAG->getRegister(IA64::r1, MVT::i64), CPI); @@ -437,9 +437,9 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { case ISD::GlobalAddress: { GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal(); - SDOperand GA = CurDAG->getTargetGlobalAddress(GV, MVT::i64); - SDOperand Tmp = - SDOperand(CurDAG->getTargetNode(IA64::ADDL_GA, MVT::i64, + SDValue GA = CurDAG->getTargetGlobalAddress(GV, MVT::i64); + SDValue Tmp = + SDValue(CurDAG->getTargetNode(IA64::ADDL_GA, MVT::i64, CurDAG->getRegister(IA64::r1, MVT::i64), GA), 0); return CurDAG->getTargetNode(IA64::LD8, MVT::i64, MVT::Other, Tmp, @@ -448,10 +448,10 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { /* XXX case ISD::ExternalSymbol: { - SDOperand EA = CurDAG->getTargetExternalSymbol( + SDValue EA = CurDAG->getTargetExternalSymbol( cast<ExternalSymbolSDNode>(N)->getSymbol(), MVT::i64); - SDOperand Tmp = CurDAG->getTargetNode(IA64::ADDL_EA, MVT::i64, + SDValue Tmp = CurDAG->getTargetNode(IA64::ADDL_EA, MVT::i64, CurDAG->getRegister(IA64::r1, MVT::i64), EA); @@ -461,8 +461,8 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { case ISD::LOAD: { // FIXME: load -1, not 1, for bools? LoadSDNode *LD = cast<LoadSDNode>(N); - SDOperand Chain = LD->getChain(); - SDOperand Address = LD->getBasePtr(); + SDValue Chain = LD->getChain(); + SDValue Address = LD->getBasePtr(); AddToISelQueue(Chain); AddToISelQueue(Address); @@ -478,7 +478,7 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { Opc = IA64::LD1; // first we load a byte, then compare for != 0 if(N->getValueType(0) == MVT::i1) { // XXX: early exit! return CurDAG->SelectNodeTo(N, IA64::CMPNE, MVT::i1, MVT::Other, - SDOperand(CurDAG->getTargetNode(Opc, MVT::i64, Address), 0), + SDValue(CurDAG->getTargetNode(Opc, MVT::i64, Address), 0), CurDAG->getRegister(IA64::r0, MVT::i64), Chain); } @@ -501,8 +501,8 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { case ISD::STORE: { StoreSDNode *ST = cast<StoreSDNode>(N); - SDOperand Address = ST->getBasePtr(); - SDOperand Chain = ST->getChain(); + SDValue Address = ST->getBasePtr(); + SDValue Chain = ST->getChain(); AddToISelQueue(Address); AddToISelQueue(Chain); @@ -513,13 +513,13 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { case MVT::i1: { // this is a bool Opc = IA64::ST1; // we store either 0 or 1 as a byte // first load zero! - SDOperand Initial = CurDAG->getCopyFromReg(Chain, IA64::r0, MVT::i64); + SDValue Initial = CurDAG->getCopyFromReg(Chain, IA64::r0, MVT::i64); Chain = Initial.getValue(1); // then load 1 into the same reg iff the predicate to store is 1 - SDOperand Tmp = ST->getValue(); + SDValue Tmp = ST->getValue(); AddToISelQueue(Tmp); Tmp = - SDOperand(CurDAG->getTargetNode(IA64::TPCADDS, MVT::i64, Initial, + SDValue(CurDAG->getTargetNode(IA64::TPCADDS, MVT::i64, Initial, CurDAG->getTargetConstant(1, MVT::i64), Tmp), 0); return CurDAG->SelectNodeTo(N, Opc, MVT::Other, Address, Tmp, Chain); @@ -537,16 +537,16 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { } } - SDOperand N1 = N->getOperand(1); - SDOperand N2 = N->getOperand(2); + SDValue N1 = N->getOperand(1); + SDValue N2 = N->getOperand(2); AddToISelQueue(N1); AddToISelQueue(N2); return CurDAG->SelectNodeTo(N, Opc, MVT::Other, N2, N1, Chain); } case ISD::BRCOND: { - SDOperand Chain = N->getOperand(0); - SDOperand CC = N->getOperand(1); + SDValue Chain = N->getOperand(0); + SDValue CC = N->getOperand(1); AddToISelQueue(Chain); AddToISelQueue(CC); MachineBasicBlock *Dest = @@ -561,14 +561,14 @@ SDNode *IA64DAGToDAGISel::Select(SDOperand Op) { int64_t Amt = cast<ConstantSDNode>(N->getOperand(1))->getValue(); unsigned Opc = N->getOpcode() == ISD::CALLSEQ_START ? IA64::ADJUSTCALLSTACKDOWN : IA64::ADJUSTCALLSTACKUP; - SDOperand N0 = N->getOperand(0); + SDValue N0 = N->getOperand(0); AddToISelQueue(N0); return CurDAG->SelectNodeTo(N, Opc, MVT::Other, getI64Imm(Amt), N0); } case ISD::BR: // FIXME: we don't need long branches all the time! - SDOperand N0 = N->getOperand(0); + SDValue N0 = N->getOperand(0); AddToISelQueue(N0); return CurDAG->SelectNodeTo(N, IA64::BRL_NOTCALL, MVT::Other, N->getOperand(1), N0); diff --git a/lib/Target/IA64/IA64ISelLowering.cpp b/lib/Target/IA64/IA64ISelLowering.cpp index fa04672698..87c81bf427 100644 --- a/lib/Target/IA64/IA64ISelLowering.cpp +++ b/lib/Target/IA64/IA64ISelLowering.cpp @@ -140,12 +140,12 @@ const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const { } } -MVT IA64TargetLowering::getSetCCResultType(const SDOperand &) const { +MVT IA64TargetLowering::getSetCCResultType(const SDValue &) const { return MVT::i1; } void IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, - SmallVectorImpl<SDOperand> &ArgValues) { + SmallVectorImpl<SDValue> &ArgValues) { // // add beautiful description of IA64 stack frame format // here (from intel 24535803.pdf most likely) @@ -177,7 +177,7 @@ void IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { - SDOperand newroot, argt; + SDValue newroot, argt; if(count < 8) { // need to fix this logic? maybe. switch (getValueType(I->getType()).getSimpleVT()) { @@ -229,7 +229,7 @@ void IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, // Create the SelectionDAG nodes corresponding to a load //from this parameter - SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64); + SDValue FIN = DAG.getFrameIndex(FI, MVT::i64); argt = newroot = DAG.getLoad(getValueType(I->getType()), DAG.getEntryNode(), FIN, NULL, 0); } @@ -302,11 +302,11 @@ void IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, } } -std::pair<SDOperand, SDOperand> -IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, +std::pair<SDValue, SDValue> +IA64TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, bool isVarArg, unsigned CallingConv, - bool isTailCall, SDOperand Callee, + bool isTailCall, SDValue Callee, ArgListTy &Args, SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); @@ -335,17 +335,17 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy())); - SDOperand StackPtr; - std::vector<SDOperand> Stores; - std::vector<SDOperand> Converts; - std::vector<SDOperand> RegValuesToPass; + SDValue StackPtr; + std::vector<SDValue> Stores; + std::vector<SDValue> Converts; + std::vector<SDValue> RegValuesToPass; unsigned ArgOffset = 16; for (unsigned i = 0, e = Args.size(); i != e; ++i) { - SDOperand Val = Args[i].Node; + SDValue Val = Args[i].Node; MVT ObjectVT = Val.getValueType(); - SDOperand ValToStore(0, 0), ValToConvert(0, 0); + SDValue ValToStore(0, 0), ValToConvert(0, 0); unsigned ObjSize=8; switch (ObjectVT.getSimpleVT()) { default: assert(0 && "unexpected argument type!"); @@ -391,7 +391,7 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, if(!StackPtr.Val) { StackPtr = DAG.getRegister(IA64::r12, MVT::i64); } - SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); + SDValue PtrOff = DAG.getConstant(ArgOffset, getPointerTy()); PtrOff = DAG.getNode(ISD::ADD, MVT::i64, StackPtr, PtrOff); Stores.push_back(DAG.getStore(Chain, ValToStore, PtrOff, NULL, 0)); ArgOffset += ObjSize; @@ -416,16 +416,16 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, IA64::F12, IA64::F13, IA64::F14, IA64::F15 }; - SDOperand InFlag; + SDValue InFlag; // save the current GP, SP and RP : FIXME: do we need to do all 3 always? - SDOperand GPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r1, MVT::i64, InFlag); + SDValue GPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r1, MVT::i64, InFlag); Chain = GPBeforeCall.getValue(1); InFlag = Chain.getValue(2); - SDOperand SPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r12, MVT::i64, InFlag); + SDValue SPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r12, MVT::i64, InFlag); Chain = SPBeforeCall.getValue(1); InFlag = Chain.getValue(2); - SDOperand RPBeforeCall = DAG.getCopyFromReg(Chain, IA64::rp, MVT::i64, InFlag); + SDValue RPBeforeCall = DAG.getCopyFromReg(Chain, IA64::rp, MVT::i64, InFlag); Chain = RPBeforeCall.getValue(1); InFlag = Chain.getValue(2); @@ -464,7 +464,7 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, */ std::vector<MVT> NodeTys; - std::vector<SDOperand> CallOperands; + std::vector<SDValue> CallOperands; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. CallOperands.push_back(Chain); @@ -494,17 +494,17 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, RetVals.push_back(MVT::Flag); MVT RetTyVT = getValueType(RetTy); - SDOperand RetVal; + SDValue RetVal; if (RetTyVT != MVT::isVoid) { switch (RetTyVT.getSimpleVT()) { default: assert(0 && "Unknown value type to return!"); case MVT::i1: { // bools are just like other integers (returned in r8) // we *could* fall through to the truncate below, but this saves a // few redundant predicate ops - SDOperand boolInR8 = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64,InFlag); + SDValue boolInR8 = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64,InFlag); InFlag = boolInR8.getValue(2); Chain = boolInR8.getValue(1); - SDOperand zeroReg = DAG.getCopyFromReg(Chain, IA64::r0, MVT::i64, InFlag); + SDValue zeroReg = DAG.getCopyFromReg(Chain, IA64::r0, MVT::i64, InFlag); InFlag = zeroReg.getValue(2); Chain = zeroReg.getValue(1); @@ -546,18 +546,18 @@ IA64TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy, Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, getPointerTy()), DAG.getConstant(0, getPointerTy()), - SDOperand()); + SDValue()); return std::make_pair(RetVal, Chain); } -SDOperand IA64TargetLowering:: -LowerOperation(SDOperand Op, SelectionDAG &DAG) { +SDValue IA64TargetLowering:: +LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Should not custom lower this!"); case ISD::GlobalTLSAddress: assert(0 && "TLS not implemented for IA64."); case ISD::RET: { - SDOperand AR_PFSVal, Copy; + SDValue AR_PFSVal, Copy; switch(Op.getNumOperands()) { default: @@ -575,22 +575,22 @@ LowerOperation(SDOperand Op, SelectionDAG &DAG) { AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64); Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), ArgReg, Op.getOperand(1), - SDOperand()); + SDValue()); AR_PFSVal = DAG.getCopyToReg(Copy.getValue(0), IA64::AR_PFS, AR_PFSVal, Copy.getValue(1)); return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, AR_PFSVal, AR_PFSVal.getValue(1)); } } - return SDOperand(); + return SDValue(); } case ISD::VAARG: { MVT VT = getPointerTy(); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); - SDOperand VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1), + SDValue VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1), SV, 0); // Increment the pointer, VAList, to the next vaarg - SDOperand VAIncr = DAG.getNode(ISD::ADD, VT, VAList, + SDValue VAIncr = DAG.getNode(ISD::ADD, VT, VAList, DAG.getConstant(VT.getSizeInBits()/8, VT)); // Store the incremented VAList to the legalized pointer @@ -602,7 +602,7 @@ LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::VASTART: { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64); + SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); } @@ -610,5 +610,5 @@ LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::RETURNADDR: break; case ISD::FRAMEADDR: break; } - return SDOperand(); + return SDValue(); } diff --git a/lib/Target/IA64/IA64ISelLowering.h b/lib/Target/IA64/IA64ISelLowering.h index 54f6c2ed15..b7508f79c3 100644 --- a/lib/Target/IA64/IA64ISelLowering.h +++ b/lib/Target/IA64/IA64ISelLowering.h @@ -49,24 +49,24 @@ namespace llvm { const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType: return ISD::SETCC's result type. - virtual MVT getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDValue &) const; /// LowerArguments - This hook must be implemented to indicate how we should /// lower the arguments for the specified function, into the specified DAG. virtual void LowerArguments(Function &F, SelectionDAG &DAG, - SmallVectorImpl<SDOperand> &ArgValues); + SmallVectorImpl<SDValue> &ArgValues); /// LowerCallTo - This hook lowers an abstract call to a function into an /// actual call. - virtual std::pair<SDOperand, SDOperand> - LowerCallTo(SDOperand Chain, const Type *RetTy, + virtual std::pair<SDValue, SDValue> + LowerCallTo(SDValue Chain, const Type *RetTy, bool RetSExt, bool RetZExt, bool isVarArg, unsigned CC, bool isTailCall, - SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG); + SDValue Callee, ArgListTy &Args, SelectionDAG &DAG); /// LowerOperation - for custom lowering specific ops /// (currently, only "ret void") - virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); }; } diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp index 388f5be701..f463d3ad7d 100644 --- a/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -78,17 +78,17 @@ private: // Include the pieces autogenerated from the target description. #include "MipsGenDAGISel.inc" - SDOperand getGlobalBaseReg(); - SDNode *Select(SDOperand N); + SDValue getGlobalBaseReg(); + SDNode *Select(SDValue N); // Complex Pattern. - bool SelectAddr(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &Offset); + bool SelectAddr(SDValue Op, SDValue N, + SDValue &Base, SDValue &Offset); // getI32Imm - Return a target constant with the specified // value, of type i32. - inline SDOperand getI32Imm(unsigned Imm) { + inline SDValue getI32Imm(unsigned Imm) { return CurDAG->getTargetConstant(Imm, MVT::i32); } @@ -124,7 +124,7 @@ InstructionSelect(SelectionDAG &SD) /// getGlobalBaseReg - Output the instructions required to put the /// GOT address into a register. -SDOperand MipsDAGToDAGISel::getGlobalBaseReg() { +SDValue MipsDAGToDAGISel::getGlobalBaseReg() { MachineFunction* MF = BB->getParent(); unsigned GP = 0; for(MachineRegisterInfo::livein_iterator ii = MF->getRegInfo().livein_begin(), @@ -141,7 +141,7 @@ SDOperand MipsDAGToDAGISel::getGlobalBaseReg() { /// ComplexPattern used on MipsInstrInfo /// Used on Mips Load/Store instructions bool MipsDAGToDAGISel:: -SelectAddr(SDOperand Op, SDOperand Addr, SDOperand &Offset, SDOperand &Base) +SelectAddr(SDValue Op, SDValue Addr, SDValue &Offset, SDValue &Base) { // if Address is FI, get the TargetFrameIndex. if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { @@ -191,7 +191,7 @@ SelectAddr(SDOperand Op, SDOperand Addr, SDOperand &Offset, SDOperand &Base) /// Select instructions not customized! Used for /// expanded, promoted and normal instructions SDNode* MipsDAGToDAGISel:: -Select(SDOperand N) +Select(SDValue N) { SDNode *Node = N.Val; unsigned Opcode = Node->getOpcode(); @@ -225,7 +225,7 @@ Select(SDOperand N) case ISD::SUBE: case ISD::ADDE: { - SDOperand InFlag = Node->getOperand(2), CmpLHS; + SDValue InFlag = Node->getOperand(2), CmpLHS; unsigned Opc = InFlag.getOpcode(), MOp; assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) || @@ -240,20 +240,20 @@ Select(SDOperand N) MOp = Mips::SUBu; } - SDOperand Ops[] = { CmpLHS, InFlag.getOperand(1) }; + SDValue Ops[] = { CmpLHS, InFlag.getOperand(1) }; - SDOperand LHS = Node->getOperand(0); - SDOperand RHS = Node->getOperand(1); + SDValue LHS = Node->getOperand(0); + SDValue RHS = Node->getOperand(1); AddToISelQueue(LHS); AddToISelQueue(RHS); MVT VT = LHS.getValueType(); SDNode *Carry = CurDAG->getTargetNode(Mips::SLTu, VT, Ops, 2); SDNode *AddCarry = CurDAG->getTargetNode(Mips::ADDu, VT, - SDOperand(Carry,0), RHS); + SDValue(Carry,0), RHS); return CurDAG->SelectNodeTo(N.Val, MOp, VT, MVT::Flag, - LHS, SDOperand(AddCarry,0)); + LHS, SDValue(AddCarry,0)); } /// Mul/Div with two results @@ -261,8 +261,8 @@ Select(SDOperand N) case ISD::UDIVREM: case ISD::SMUL_LOHI: case ISD::UMUL_LOHI: { - SDOperand Op1 = Node->getOperand(0); - SDOperand Op2 = Node->getOperand(1); + SDValue Op1 = Node->getOperand(0); + SDValue Op2 = Node->getOperand(1); AddToISelQueue(Op1); AddToISelQueue(Op2); @@ -274,17 +274,17 @@ Select(SDOperand N) SDNode *Node = CurDAG->getTargetNode(Op, MVT::Flag, Op1, Op2); - SDOperand InFlag = SDOperand(Node, 0); + SDValue InFlag = SDValue(Node, 0); SDNode *Lo = CurDAG->getTargetNode(Mips::MFLO, MVT::i32, MVT::Flag, InFlag); - InFlag = SDOperand(Lo,1); + InFlag = SDValue(Lo,1); SDNode *Hi = CurDAG->getTargetNode(Mips::MFHI, MVT::i32, InFlag); if (!N.getValue(0).use_empty()) - ReplaceUses(N.getValue(0), SDOperand(Lo,0)); + ReplaceUses(N.getValue(0), SDValue(Lo,0)); if (!N.getValue(1).use_empty()) - ReplaceUses(N.getValue(1), SDOperand(Hi,0)); + ReplaceUses(N.getValue(1), SDValue(Hi,0)); return NULL; } @@ -293,15 +293,15 @@ Select(SDOperand N) case ISD::MUL: case ISD::MULHS: case ISD::MULHU: { - SDOperand MulOp1 = Node->getOperand(0); - SDOperand MulOp2 = Node->getOperand(1); + SDValue MulOp1 = Node->getOperand(0); + SDValue MulOp2 = Node->getOperand(1); AddToISelQueue(MulOp1); AddToISelQueue(MulOp2); unsigned MulOp = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT); SDNode *MulNode = CurDAG->getTargetNode(MulOp, MVT::Flag, MulOp1, MulOp2); - SDOperand InFlag = SDOperand(MulNode, 0); + SDValue InFlag = SDValue(MulNode, 0); if (MulOp == ISD::MUL) return CurDAG->getTargetNode(Mips::MFLO, MVT::i32, InFlag); @@ -314,8 +314,8 @@ Select(SDOperand N) case ISD::UREM: case ISD::SDIV: case ISD::UDIV: { - SDOperand Op1 = Node->getOperand(0); - SDOperand Op2 = Node->getOperand(1); + SDValue Op1 = Node->getOperand(0); + SDValue Op2 = Node->getOperand(1); AddToISelQueue(Op1); AddToISelQueue(Op2); @@ -329,13 +329,13 @@ Select(SDOperand N) } SDNode *Node = CurDAG->getTargetNode(Op, MVT::Flag, Op1, Op2); - SDOperand InFlag = SDOperand(Node, 0); + SDValue InFlag = SDValue(Node, 0); return CurDAG->getTargetNode(MOp, MVT::i32, InFlag); } // Get target GOT address. case ISD::GLOBAL_OFFSET_TABLE: { - SDOperand Result = getGlobalBaseReg(); + SDValue Result = getGlobalBaseReg(); ReplaceUses(N, Result); return NULL; } @@ -347,21 +347,21 @@ Select(SDOperand N) case MipsISD::JmpLink: { if (TM.getRelocationModel() == Reloc::PIC_) { //bool isCodeLarge = (TM.getCodeModel() == CodeModel::Large); - SDOperand Chain = Node->getOperand(0); - SDOperand Callee = Node->getOperand(1); + SDValue Chain = Node->getOperand(0); + SDValue Callee = Node->getOperand(1); AddToISelQueue(Chain); - SDOperand T9Reg = CurDAG->getRegister(Mips::T9, MVT::i32); - SDOperand InFlag(0, 0); + SDValue T9Reg = CurDAG->getRegister(Mips::T9, MVT::i32); + SDValue InFlag(0, 0); if ( (isa<GlobalAddressSDNode>(Callee)) || (isa<ExternalSymbolSDNode>(Callee)) ) { /// Direct call for global addresses and external symbols - SDOperand GPReg = CurDAG->getRegister(Mips::GP, MVT::i32); + SDValue GPReg = CurDAG->getRegister(Mips::GP, MVT::i32); // Use load to get GOT target - SDOperand Ops[] = { Callee, GPReg, Chain }; - SDOperand Load = SDOperand(CurDAG->getTargetNode(Mips::LW, MVT::i32, + SDValue Ops[] = { Callee, GPReg, Chain }; + SDValue Load = SDValue(CurDAG->getTargetNode(Mips::LW, MVT::i32, MVT::Other, Ops, 3), 0); Chain = Load.getValue(1); AddToISelQueue(Chain); @@ -377,10 +377,10 @@ Select(SDOperand N) // Emit Jump and Link Register SDNode *ResNode = CurDAG->getTargetNode(Mips::JALR, MVT::Other, MVT::Flag, T9Reg, Chain); - Chain = SDOperand(ResNode, 0); - InFlag = SDOperand(ResNode, 1); - ReplaceUses(SDOperand(Node, 0), Chain); - ReplaceUses(SDOperand(Node, 1), InFlag); + Chain = SDValue(ResNode, 0); + InFlag = SDValue(ResNode, 1); + ReplaceUses(SDValue(Node, 0), Chain); + ReplaceUses(SDValue(Node, 1), InFlag); return ResNode; } } diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index d6e5ecc5c6..0ba62b3728 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -132,13 +132,13 @@ MipsTargetLowering(MipsTargetMachine &TM): TargetLowering(TM) } -MVT MipsTargetLowering::getSetCCResultType(const SDOperand &) const { +MVT MipsTargetLowering::getSetCCResultType(const SDValue &) const { return MVT::i32; } -SDOperand MipsTargetLowering:: -LowerOperation(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { @@ -151,7 +151,7 @@ LowerOperation(SDOperand Op, SelectionDAG &DAG) case ISD::ConstantPool: return LowerConstantPool(Op, DAG); case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); } - return SDOperand(); + return SDValue(); } MachineBasicBlock * @@ -268,105 +268,105 @@ bool MipsTargetLowering::IsGlobalInSmallSection(GlobalValue *GV) //===----------------------------------------------------------------------===// // Misc Lower Operation implementation //===----------------------------------------------------------------------===// -SDOperand MipsTargetLowering:: -LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) { GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); - SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32); + SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); if (!Subtarget->hasABICall()) { if (isa<Function>(GV)) return GA; const MVT *VTs = DAG.getNodeValueTypes(MVT::i32); - SDOperand Ops[] = { GA }; + SDValue Ops[] = { GA }; if (IsGlobalInSmallSection(GV)) { // %gp_rel relocation - SDOperand GPRelNode = DAG.getNode(MipsISD::GPRel, VTs, 1, Ops, 1); - SDOperand GOT = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, MVT::i32); + SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, VTs, 1, Ops, 1); + SDValue GOT = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, MVT::i32); return DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode); } // %hi/%lo relocation - SDOperand HiPart = DAG.getNode(MipsISD::Hi, VTs, 1, Ops, 1); - SDOperand Lo = DAG.getNode(MipsISD::Lo, MVT::i32, GA); + SDValue HiPart = DAG.getNode(MipsISD::Hi, VTs, 1, Ops, 1); + SDValue Lo = DAG.getNode(MipsISD::Lo, MVT::i32, GA); return DAG.getNode(ISD::ADD, MVT::i32, HiPart, Lo); } else { // Abicall relocations, TODO: make this cleaner. - SDOperand ResNode = DAG.getLoad(MVT::i32, DAG.getEntryNode(), GA, NULL, 0); + SDValue ResNode = DAG.getLoad(MVT::i32, DAG.getEntryNode(), GA, NULL, 0); // On functions and global targets not internal linked only // a load from got/GP is necessary for PIC to work. if (!GV->hasInternalLinkage() || isa<Function>(GV)) return ResNode; - SDOperand Lo = DAG.getNode(MipsISD::Lo, MVT::i32, GA); + SDValue Lo = DAG.getNode(MipsISD::Lo, MVT::i32, GA); return DAG.getNode(ISD::ADD, MVT::i32, ResNode, Lo); } assert(0 && "Dont know how to handle GlobalAddress"); - return SDOperand(0,0); + return SDValue(0,0); } -SDOperand MipsTargetLowering:: -LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) { assert(0 && "TLS not implemented for MIPS."); - return SDOperand(); // Not reached + return SDValue(); // Not reached } -SDOperand MipsTargetLowering:: -LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) { - SDOperand LHS = Op.getOperand(0); - SDOperand RHS = Op.getOperand(1); - SDOperand True = Op.getOperand(2); - SDOperand False = Op.getOperand(3); - SDOperand CC = Op.getOperand(4); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + SDValue True = Op.getOperand(2); + SDValue False = Op.getOperand(3); + SDValue CC = Op.getOperand(4); const MVT *VTs = DAG.getNodeValueTypes(MVT::i32); - SDOperand Ops[] = { LHS, RHS, CC }; - SDOperand SetCCRes = DAG.getNode(ISD::SETCC, VTs, 1, Ops, 3); + SDValue Ops[] = { LHS, RHS, CC }; + SDValue SetCCRes = DAG.getNode(ISD::SETCC, VTs, 1, Ops, 3); return DAG.getNode(MipsISD::SelectCC, True.getValueType(), SetCCRes, True, False); } -SDOperand MipsTargetLowering:: -LowerJumpTable(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerJumpTable(SDValue Op, SelectionDAG &DAG) { - SDOperand ResNode; - SDOperand HiPart; + SDValue ResNode; + SDValue HiPart; MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); - SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); + SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { const MVT *VTs = DAG.getNodeValueTypes(MVT::i32); - SDOperand Ops[] = { JTI }; + SDValue Ops[] = { JTI }; HiPart = DAG.getNode(MipsISD::Hi, VTs, 1, Ops, 1); } else // Emit Load from Global Pointer HiPart = DAG.getLoad(MVT::i32, DAG.getEntryNode(), JTI, NULL, 0); - SDOperand Lo = DAG.getNode(MipsISD::Lo, MVT::i32, JTI); + SDValue Lo = DAG.getNode(MipsISD::Lo, MVT::i32, JTI); ResNode = DAG.getNode(ISD::ADD, MVT::i32, HiPart, Lo); return ResNode; } -SDOperand MipsTargetLowering:: -LowerConstantPool(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerConstantPool(SDValue Op, SelectionDAG &DAG) { - SDOperand ResNode; + SDValue ResNode; ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); Constant *C = N->getConstVal(); - SDOperand CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment()); + SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment()); // gp_rel relocation if (!Subtarget->hasABICall() && IsInSmallSection(getTargetData()->getABITypeSize(C->getType()))) { - SDOperand GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP); - SDOperand GOT = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, MVT::i32); + SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP); + SDValue GOT = DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, MVT::i32); ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode); } else { // %hi/%lo relocation - SDOperand HiPart = DAG.getNode(MipsISD::Hi, MVT::i32, CP); - SDOperand Lo = DAG.getNode(MipsISD::Lo, MVT::i32, CP); + SDValue HiPart = DAG.getNode(MipsISD::Hi, MVT::i32, CP); + SDValue Lo = DAG.getNode(MipsISD::Lo, MVT::i32, CP); ResNode = DAG.getNode(ISD::ADD, MVT::i32, HiPart, Lo); } @@ -391,8 +391,8 @@ LowerConstantPool(SDOperand Op, SelectionDAG &DAG) //===----------------------------------------------------------------------===// /// Mips custom CALL implementation -SDOperand MipsTargetLowering:: -LowerCALL(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerCALL(SDValue Op, SelectionDAG &DAG) { unsigned CallingConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); @@ -410,13 +410,13 @@ LowerCALL(SDOperand Op, SelectionDAG &DAG) /// regs to (physical regs)/(stack frame), CALLSEQ_START and /// CALLSEQ_END are emitted. /// TODO: isVarArg, isTailCall. -SDOperand MipsTargetLowering:: -LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) +SDValue MipsTargetLowering:: +LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC) { MachineFunction &MF = DAG.getMachineFunction(); - SDOperand Chain = Op.getOperand(0); - SDOperand Callee = Op.getOperand(4); + SDValue Chain = Op.getOperand(0); + SDValue Callee = Op.getOperand(4); bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; MachineFrameInfo *MFI = MF.getFrameInfo(); @@ -440,8 +440,8 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) getPointerTy())); // With EABI is it possible to have 16 args on registers. - SmallVector<std::pair<unsigned, SDOperand>, 16> RegsToPass; - SmallVector<SDOperand, 8> MemOpChains; + SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass; + SmallVector<SDValue, 8> MemOpChains; // First/LastArgStackLoc contains the first/last // "at stack" argument location. @@ -453,7 +453,7 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) CCValAssign &VA = ArgLocs[i]; // Arguments start after the 5 first operands of ISD::CALL - SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); + SDValue Arg = Op.getOperand(5+2*VA.getValNo()); // Promote the value if needed. switch (VA.getLocInfo()) { @@ -488,7 +488,7 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8, LastArgStackLoc); - SDOperand PtrOff = DAG.getFrameIndex(FI,getPointerTy()); + SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy()); // emit ISD::STORE whichs stores the // parameter value to a stack Location @@ -505,7 +505,7 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) // chain and flag operands which copy the outgoing args into registers. // The InFlag in necessary since all emited instructions must be // stuck together. - SDOperand InFlag; + SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, InFlag); @@ -526,7 +526,7 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) // // Returns a chain & a flag for retval copy to use. SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; Ops.push_back(Chain); Ops.push_back(Callee); @@ -570,17 +570,17 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) // Reload GP value. FI = MipsFI->getGPFI(); - SDOperand FIN = DAG.getFrameIndex(FI,getPointerTy()); - SDOperand GPLoad = DAG.getLoad(MVT::i32, Chain, FIN, NULL, 0); + SDValue FIN = DAG.getFrameIndex(FI,getPointerTy()); + SDValue GPLoad = DAG.getLoad(MVT::i32, Chain, FIN, NULL, 0); Chain = GPLoad.getValue(1); Chain = DAG.getCopyToReg(Chain, DAG.getRegister(Mips::GP, MVT::i32), - GPLoad, SDOperand(0,0)); + GPLoad, SDValue(0,0)); InFlag = Chain.getValue(1); } // Handle result values, copying them out of physregs into vregs that we // return. - return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); + return SDValue(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); } /// LowerCallResult - Lower the result values of an ISD::CALL into the @@ -589,7 +589,7 @@ LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC) /// being lowered. Returns a SDNode with the same number of values as the /// ISD::CALL. SDNode *MipsTargetLowering:: -LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, +LowerCallResult(SDValue Chain, SDValue InFlag, SDNode *TheCall, unsigned CallingConv, SelectionDAG &DAG) { bool isVarArg = cast<ConstantSDNode>(TheCall->getOperand(2))->getValue() != 0; @@ -599,7 +599,7 @@ LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); CCInfo.AnalyzeCallResult(TheCall, RetCC_Mips); - SmallVector<SDOperand, 8> ResultVals; + SmallVector<SDValue, 8> ResultVals; // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -621,8 +621,8 @@ LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, //===----------------------------------------------------------------------===// /// Mips custom FORMAL_ARGUMENTS implementation -SDOperand MipsTargetLowering:: -LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); switch(CC) @@ -638,10 +638,10 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) /// virtual registers and generate load operations for /// arguments places on the stack. /// TODO: isVarArg -SDOperand MipsTargetLowering:: -LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerCCCArguments(SDValue Op, SelectionDAG &DAG) { - SDOperand Root = Op.getOperand(0); + SDValue Root = Op.getOperand(0); MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); @@ -659,8 +659,8 @@ LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); CCInfo.AnalyzeFormalArguments(Op.Val, CC_Mips); - SmallVector<SDOperand, 16> ArgValues; - SDOperand StackPtr; + SmallVector<SDValue, 16> ArgValues; + SDValue StackPtr; unsigned FirstStackArgLoc = (Subtarget->isABI_EABI() ? 0 : 16); @@ -689,7 +689,7 @@ LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) // Transform the arguments stored on // physical registers into virtual ones unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); - SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); + SDValue ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); // If this is an 8 or 16-bit value, it is really passed promoted // to 32 bits. Insert an assert[sz]ext to capture this, then @@ -722,7 +722,7 @@ LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) // Arguments are always 32-bit. int FI = MFI->CreateFixedObject(4, 0); MipsFI->recordStoreVarArgsFI(FI, -(4+(i*4))); - SDOperand PtrOff = DAG.getFrameIndex(FI, getPointerTy()); + SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy()); // emit ISD::STORE whichs stores the // parameter value to a stack Location @@ -748,7 +748,7 @@ LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) (FirstStackArgLoc + VA.getLocMemOffset()))); // Create load nodes to retrieve arguments from the stack - SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0)); } } @@ -762,7 +762,7 @@ LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32)); MipsFI->setSRetReturnReg(Reg); } - SDOperand Copy = DAG.getCopyToReg(DAG.getEntryNode(), Reg, ArgValues[0]); + SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), Reg, ArgValues[0]); Root = DAG.getNode(ISD::TokenFactor, MVT::Other, Copy, Root); } @@ -777,8 +777,8 @@ LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) // Return Value Calling Convention Implementation //===----------------------------------------------------------------------===// -SDOperand MipsTargetLowering:: -LowerRET(SDOperand Op, SelectionDAG &DAG) +SDValue MipsTargetLowering:: +LowerRET(SDValue Op, SelectionDAG &DAG) { // CCValAssign - represent the assignment of // the return value to a location @@ -801,8 +801,8 @@ LowerRET(SDOperand Op, SelectionDAG &DAG) } // The chain is always operand #0 - SDOperand Chain = Op.getOperand(0); - SDOperand Flag; + SDValue Chain = Op.getOperand(0); + SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -829,7 +829,7 @@ LowerRET(SDOperand Op, SelectionDAG &DAG) if (!Reg) assert(0 && "sret virtual register not created in the entry block"); - SDOperand Val = DAG.getCopyFromReg(Chain, Reg, getPointerTy()); + SDValue Val = DAG.getCopyFromReg(Chain, Reg, getPointerTy()); Chain = DAG.getCopyToReg(Chain, Mips::V0, Val, Flag); Flag = Chain.getValue(1); diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index 9c9a455fab..ccc798180b 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -66,36 +66,36 @@ namespace llvm { explicit MipsTargetLowering(MipsTargetMachine &TM); /// LowerOperation - Provide custom lowering hooks for some operations. - virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); /// getTargetNodeName - This method returns the name of a target specific // DAG node. virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - get the ISD::SETCC result ValueType - MVT getSetCCResultType(const SDOperand &) const; + MVT getSetCCResultType(const SDValue &) const; private: // Subtarget Info const MipsSubtarget *Subtarget; // Lower Operand helpers - SDOperand LowerCCCArguments(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerCCCCallTo(SDOperand Op, SelectionDAG &DAG, unsigned CC); - SDNode *LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode*TheCall, + SDValue LowerCCCArguments(SDValue Op, SelectionDAG &DAG); + SDValue LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC); + SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, SDNode*TheCall, unsigned CallingConv, SelectionDAG &DAG); bool IsGlobalInSmallSection(GlobalValue *GV); bool IsInSmallSection(unsigned Size); // Lower Operand specifics - SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG); + SDValue LowerRET(SDValue Op, SelectionDAG &DAG); + SDValue LowerCALL(SDValue Op, SelectionDAG &DAG); + SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); + SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG); virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB); diff --git a/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp b/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp index 509ba83558..ba002bd0e3 100644 --- a/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp +++ b/lib/Target/PIC16/PIC16ISelDAGToDAG.cpp @@ -71,20 +71,20 @@ private: // Include the pieces autogenerated from the target description. #include "PIC16GenDAGISel.inc" - SDNode *Select(SDOperand N); + SDNode *Select(SDValue N); // Select addressing mode. currently assume base + offset addr mode. - bool SelectAM(SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset); - bool SelectDirectAM(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Offset); - bool StoreInDirectAM(SDOperand Op, SDOperand N, SDOperand &fsr); - bool LoadFSR(SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset); - bool LoadNothing(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Offset); + bool SelectAM(SDValue Op, SDValue N, SDValue &Base, SDValue &Offset); + bool SelectDirectAM(SDValue Op, SDValue N, SDValue &Base, + SDValue &Offset); + bool StoreInDirectAM(SDValue Op, SDValue N, SDValue &fsr); + bool LoadFSR(SDValue Op, SDValue N, SDValue &Base, SDValue &Offset); + bool LoadNothing(SDValue Op, SDValue N, SDValue &Base, + SDValue &Offset); // getI8Imm - Return a target constant with the specified // value, of type i8. - inline SDOperand getI8Imm(unsigned Imm) { + inline SDValue getI8Imm(unsigned Imm) { return CurDAG->getTargetConstant(Imm, MVT::i8); } @@ -118,7 +118,7 @@ void PIC16DAGToDAGISel::InstructionSelect(SelectionDAG &SD) bool PIC16DAGToDAGISel:: -SelectDirectAM (SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset) +SelectDirectAM (SDValue Op, SDValue N, SDValue &Base, SDValue &Offset) { GlobalAddressSDNode *GA; ConstantSDNode *GC; @@ -160,7 +160,7 @@ SelectDirectAM (SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset) // FIXME: must also account for preinc/predec/postinc/postdec. bool PIC16DAGToDAGISel:: -StoreInDirectAM (SDOperand Op, SDOperand N, SDOperand &fsr) +StoreInDirectAM (SDValue Op, SDValue N, SDValue &fsr) { RegisterSDNode *Reg; if (N.getOpcode() == ISD::LOAD) { @@ -186,7 +186,7 @@ StoreInDirectAM (SDOperand Op, SDOperand N, SDOperand &fsr) } bool PIC16DAGToDAGISel:: -LoadFSR (SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset) +LoadFSR (SDValue Op, SDValue N, SDValue &Base, SDValue &Offset) { GlobalAddressSDNode *GA; @@ -207,7 +207,7 @@ LoadFSR (SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset) // LoadNothing - Don't thake this seriously, it will change. bool PIC16DAGToDAGISel:: -LoadNothing (SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset) +LoadNothing (SDValue Op, SDValue N, SDValue &Base, SDValue &Offset) { GlobalAddressSDNode *GA; if (N.getOpcode() == ISD::GlobalAddress) { @@ -225,7 +225,7 @@ LoadNothing (SDOperand Op, SDOperand N, SDOperand &Base, SDOperand &Offset) /// Select - Select instructions not customized! Used for /// expanded, promoted and normal instructions. -SDNode* PIC16DAGToDAGISel::Select(SDOperand N) +SDNode* PIC16DAGToDAGISel::Select(SDValue N) { SDNode *Node = N.Val; unsigned Opcode = Node->getOpcode(); diff --git a/lib/Target/PIC16/PIC16ISelLowering.cpp b/lib/Target/PIC16/PIC16ISelLowering.cpp index 78936b8835..fe22f2ef8a 100644 --- a/lib/Target/PIC16/PIC16ISelLowering.cpp +++ b/lib/Target/PIC16/PIC16ISelLowering.cpp @@ -159,7 +159,7 @@ PIC16TargetLowering(PIC16TargetMachine &TM): TargetLowering(TM) } -SDOperand PIC16TargetLowering:: LowerOperation(SDOperand Op, SelectionDAG &DAG) +SDValue PIC16TargetLowering:: LowerOperation(SDValue Op, SelectionDAG &DAG) { SDVTList VTList16 = DAG.getVTList(MVT::i16, MVT::i16, MVT::Other); switch (Op.getOpcode()) { @@ -195,7 +195,7 @@ SDOperand PIC16TargetLowering:: LowerOperation(SDOperand Op, SelectionDAG &DAG) DOUT << "==== lowering BR_CC\n"; return LowerBR_CC(Op, DAG); } // end switch. - return SDOperand(); + return SDValue(); } @@ -203,21 +203,21 @@ SDOperand PIC16TargetLowering:: LowerOperation(SDOperand Op, SelectionDAG &DAG) // Lower helper functions //===----------------------------------------------------------------------===// -SDOperand PIC16TargetLowering::LowerBR_CC(SDOperand Op, SelectionDAG &DAG) +SDValue PIC16TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); - SDOperand Chain = Op.getOperand(0); + SDValue Chain = Op.getOperand(0); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); - SDOperand LHS = Op.getOperand(2); - SDOperand RHS = Op.getOperand(3); - SDOperand JumpVal = Op.getOperand(4); - SDOperand Result; + SDValue LHS = Op.getOperand(2); + SDValue RHS = Op.getOperand(3); + SDValue JumpVal = Op.getOperand(4); + SDValue Result; unsigned cmpOpcode; unsigned branchOpcode; - SDOperand branchOperand; + SDValue branchOperand; - SDOperand StatusReg = DAG.getRegister(PIC16::STATUSREG, MVT::i8); - SDOperand CPUReg = DAG.getRegister(PIC16::WREG, MVT::i8); + SDValue StatusReg = DAG.getRegister(PIC16::STATUSREG, MVT::i8); + SDValue CPUReg = DAG.getRegister(PIC16::WREG, MVT::i8); switch(CC) { default: assert(0 && "This condition code is not handled yet!!"); @@ -263,7 +263,7 @@ SDOperand PIC16TargetLowering::LowerBR_CC(SDOperand Op, SelectionDAG &DAG) } // End of Switch SDVTList VTList = DAG.getVTList(MVT::i8, MVT::Flag); - SDOperand CmpValue = DAG.getNode(cmpOpcode, VTList, LHS, RHS).getValue(1); + SDValue CmpValue = DAG.getNode(cmpOpcode, VTList, LHS, RHS).getValue(1); Result = DAG.getNode(branchOpcode, VT, Chain, JumpVal, branchOperand, StatusReg, CmpValue); return Result; @@ -276,23 +276,23 @@ SDOperand PIC16TargetLowering::LowerBR_CC(SDOperand Op, SelectionDAG &DAG) // LowerGlobalAddress - Create a constant pool entry for global value // and wrap it in a wrapper node. -SDOperand -PIC16TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) +SDValue +PIC16TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = getPointerTy(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); // FIXME: for now only do the ram. - SDOperand CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 2); - SDOperand CPBank = DAG.getNode(PIC16ISD::SetBank, MVT::i8, CPAddr); + SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 2); + SDValue CPBank = DAG.getNode(PIC16ISD::SetBank, MVT::i8, CPAddr); CPAddr = DAG.getNode(PIC16ISD::Wrapper, MVT::i8, CPAddr,CPBank); return CPAddr; } -SDOperand -PIC16TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) +SDValue +PIC16TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { switch(Op.getNumOperands()) { default: @@ -300,12 +300,12 @@ PIC16TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) abort(); case 1: - return SDOperand(); // ret void is legal + return SDValue(); // ret void is legal } } -SDOperand -PIC16TargetLowering::LowerFrameIndex(SDOperand N, SelectionDAG &DAG) +SDValue +PIC16TargetLowering::LowerFrameIndex(SDValue N, SelectionDAG &DAG) { if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N)) { return DAG.getTargetFrameIndex(FIN->getIndex(), MVT::i32); @@ -314,17 +314,17 @@ PIC16TargetLowering::LowerFrameIndex(SDOperand N, SelectionDAG &DAG) return N; } -SDOperand +SDValue PIC16TargetLowering::LowerLOAD(SDNode *N, SelectionDAG &DAG, DAGCombinerInfo &DCI) const { - SDOperand Outs[2]; - SDOperand TF; //TokenFactor - SDOperand OutChains[2]; - SDOperand Chain = N->getOperand(0); - SDOperand Src = N->getOperand(1); - SDOperand retVal; + SDValue Outs[2]; + SDValue TF; //TokenFactor + SDValue OutChains[2]; + SDValue Chain = N->getOperand(0); + SDValue Src = N->getOperand(1); + SDValue retVal; SDVTList VTList; // If this load is directly stored, replace the load value with the stored @@ -332,7 +332,7 @@ PIC16TargetLowering::LowerLOAD(SDNode *N, // FIXME: Handle store large -> read small portion. // FIXME: Handle TRUNCSTORE/LOADEXT LoadSDNode *LD = cast<LoadSDNode>(N); - SDOperand Ptr = LD->getBasePtr(); + SDValue Ptr = LD->getBasePtr(); if (LD->getExtensionType() == ISD::NON_EXTLOAD) { if (ISD::isNON_TRUNCStore(Chain.Val)) { StoreSDNode *PrevST = cast<StoreSDNode>(Chain); @@ -343,9 +343,9 @@ PIC16TargetLowering::LowerLOAD(SDNode *N, } if (N->getValueType(0) != MVT::i16) - return SDOperand(); + return SDValue(); - SDOperand toWorklist; + SDValue toWorklist; Outs[0] = DAG.getLoad(MVT::i8, Chain, Src, NULL, 0); toWorklist = DAG.getNode(ISD::ADD, MVT::i16, Src, DAG.getConstant(1, MVT::i16)); @@ -367,17 +367,17 @@ PIC16TargetLowering::LowerLOAD(SDNode *N, return retVal; } -SDOperand +SDValue PIC16TargetLowering::LowerADDSUB(SDNode *N, SelectionDAG &DAG, DAGCombinerInfo &DCI) const { bool changed = false; int i; - SDOperand LoOps[3], HiOps[3]; - SDOperand OutOps[3]; // [0]:left, [1]:right, [2]:carry - SDOperand InOp[2]; - SDOperand retVal; - SDOperand as1,as2; + SDValue LoOps[3], HiOps[3]; + SDValue OutOps[3]; // [0]:left, [1]:right, [2]:carry + SDValue InOp[2]; + SDValue retVal; + SDValue as1,as2; SDVTList VTList; unsigned AS = 0, ASE = 0, ASC=0; @@ -427,7 +427,7 @@ PIC16TargetLowering::LowerADDSUB(SDNode *N, SelectionDAG &DAG, for (i = 0; i < 2; i++) { if (InOp[i].getOpcode() == ISD::GlobalAddress) { // We don't want to lower subs/adds with global address yet. - return SDOperand(); + return SDValue(); } else if (InOp[i].getOpcode() == ISD::Constant) { changed = true; @@ -443,7 +443,7 @@ PIC16TargetLowering::LowerADDSUB(SDNode *N, SelectionDAG &DAG, changed = true; // LowerLOAD returns a Package node or it may combine and return // anything else. - SDOperand lowered = LowerLOAD(InOp[i].Val, DAG, DCI); + SDValue lowered = LowerLOAD(InOp[i].Val, DAG, DCI); // So If LowerLOAD returns something other than Package, // then just call ADD again. @@ -462,7 +462,7 @@ PIC16TargetLowering::LowerADDSUB(SDNode *N, SelectionDAG &DAG, changed = true; // Must call LowerADDSUB recursively here, // LowerADDSUB returns a Package node. - SDOperand lowered = LowerADDSUB(InOp[i].Val, DAG, DCI); + SDValue lowered = LowerADDSUB(InOp[i].Val, DAG, DCI); LoOps[i] = lowered.getOperand(0); HiOps[i] = lowered.getOperand(1); @@ -533,11 +533,11 @@ PIC16TargetLowering::LowerADDSUB(SDNode *N, SelectionDAG &DAG, //===----------------------------------------------------------------------===// // FORMAL_ARGUMENTS Calling Convention Implementation //===----------------------------------------------------------------------===// -SDOperand PIC16TargetLowering:: -LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) +SDValue PIC16TargetLowering:: +LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { - SmallVector<SDOperand, 8> ArgValues; - SDOperand Root = Op.getOperand(0); + SmallVector<SDValue, 8> ArgValues; + SDValue Root = Op.getOperand(0); // Return the new list of results. // FIXME: Just copy right now. @@ -560,7 +560,7 @@ LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) // Target Optimization Hooks //===----------------------------------------------------------------------===// -SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, +SDValue PIC16TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { int i; @@ -573,14 +573,14 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, case PIC16ISD::Package: DOUT << "==== combining PIC16ISD::Package\n"; - return SDOperand(); + return SDValue(); case ISD::ADD: case ISD::SUB: if ((N->getOperand(0).getOpcode() == ISD::GlobalAddress) || (N->getOperand(0).getOpcode() == ISD::FrameIndex)) { // Do not touch pointer adds. - return SDOperand (); + return SDValue (); } break; @@ -589,11 +589,11 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, case ISD::SUBE : case ISD::SUBC : if (N->getValueType(0) == MVT::i16) { - SDOperand retVal = LowerADDSUB(N, DAG,DCI); + SDValue retVal = LowerADDSUB(N, DAG,DCI); // LowerADDSUB has already combined the result, // so we just return nothing to avoid assertion failure from llvm // if N has been deleted already. - return SDOperand(); + return SDValue(); } else if (N->getValueType(0) == MVT::i8) { // Sanity check .... @@ -609,12 +609,12 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, // FIXME: split this large chunk of code. case ISD::STORE : { - SDOperand Chain = N->getOperand(0); - SDOperand Src = N->getOperand(1); - SDOperand Dest = N->getOperand(2); + SDValue Chain = N->getOperand(0); + SDValue Src = N->getOperand(1); + SDValue Dest = N->getOperand(2); unsigned int DstOff = 0; int NUM_STORES = 0; - SDOperand Stores[6]; + SDValue Stores[6]; // if source operand is expected to be extended to // some higher type then - remove this extension @@ -652,10 +652,10 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, //create direct addressing a = b Chain = Src.getOperand(0); for (i=0; i<NUM_STORES; i++) { - SDOperand ADN = DAG.getNode(ISD::ADD, MVT::i16, Src.getOperand(1), + SDValue ADN = DAG.getNode(ISD::ADD, MVT::i16, Src.getOperand(1), DAG.getConstant(DstOff, MVT::i16)); - SDOperand LDN = DAG.getLoad(MVT::i8, Chain, ADN, NULL, 0); - SDOperand DSTADDR = DAG.getNode(ISD::ADD, MVT::i16, Dest, + SDValue LDN = DAG.getLoad(MVT::i8, Chain, ADN, NULL, 0); + SDValue DSTADDR = DAG.getNode(ISD::ADD, MVT::i16, Dest, DAG.getConstant(DstOff, MVT::i16)); Stores[i] = DAG.getStore(Chain, LDN, DSTADDR, NULL, 0); Chain = Stores[i]; @@ -670,8 +670,8 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, //create direct addressing a = CONST CST = dyn_cast<ConstantSDNode>(Src); for (i = 0; i < NUM_STORES; i++) { - SDOperand CNST = DAG.getConstant(CST->getValue() >> i*8, MVT::i8); - SDOperand ADN = DAG.getNode(ISD::ADD, MVT::i16, Dest, + SDValue CNST = DAG.getConstant(CST->getValue() >> i*8, MVT::i8); + SDValue ADN = DAG.getNode(ISD::ADD, MVT::i16, Dest, DAG.getConstant(DstOff, MVT::i16)); Stores[i] = DAG.getStore(Chain, CNST, ADN, NULL, 0); Chain = Stores[i]; @@ -686,11 +686,11 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, // Create indirect addressing. CST = dyn_cast<ConstantSDNode>(Src); Chain = Dest.getOperand(0); - SDOperand Load; + SDValue Load; Load = DAG.getLoad(MVT::i16, Chain,Dest.getOperand(1), NULL, 0); Chain = Load.getValue(1); for (i=0; i<NUM_STORES; i++) { - SDOperand CNST = DAG.getConstant(CST->getValue() >> i*8, MVT::i8); + SDValue CNST = DAG.getConstant(CST->getValue() >> i*8, MVT::i8); Stores[i] = DAG.getStore(Chain, CNST, Load, NULL, 0); Chain = Stores[i]; DstOff += 1; @@ -701,11 +701,11 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, } else if (isa<LoadSDNode>(Dest) && isa<GlobalAddressSDNode>(Src)) { // GlobalAddressSDNode *GAD = dyn_cast<GlobalAddressSDNode>(Src); - return SDOperand(); + return SDValue(); } else if (Src.getOpcode() == PIC16ISD::Package) { StoreSDNode *st = dyn_cast<StoreSDNode>(N); - SDOperand toWorkList, retVal; + SDValue toWorkList, retVal; Chain = N->getOperand(0); if (st->isTruncatingStore()) { @@ -739,7 +739,7 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, case ISD::LOAD : { - SDOperand Ptr = N->getOperand(1); + SDValue Ptr = N->getOperand(1); if (Ptr.getOpcode() == PIC16ISD::Package) { assert (0 && "not implemented yet"); } @@ -747,20 +747,20 @@ SDOperand PIC16TargetLowering::PerformDAGCombine(SDNode *N, break; } // end switch. - return SDOperand(); + return SDValue(); } //===----------------------------------------------------------------------===// // Utility functions //===----------------------------------------------------------------------===// -const SDOperand *PIC16TargetLowering:: -findLoadi8(const SDOperand &Src, SelectionDAG &DAG) const +const SDValue *PIC16TargetLowering:: +findLoadi8(const SDValue &Src, SelectionDAG &DAG) const { unsigned int i; if ((Src.getOpcode() == ISD::LOAD) && (Src.getValueType() == MVT::i8)) return &Src; for (i=0; i<Src.getNumOperands(); i++) { - const SDOperand *retVal = findLoadi8(Src.getOperand(i),DAG); + const SDValue *retVal = findLoadi8(Src.getOperand(i),DAG); if (retVal) return retVal; } diff --git a/lib/Target/PIC16/PIC16ISelLowering.h b/lib/Target/PIC16/PIC16ISelLowering.h index 41650ee33e..c31d3444cd 100644 --- a/lib/Target/PIC16/PIC16ISelLowering.h +++ b/lib/Target/PIC16/PIC16ISelLowering.h @@ -64,28 +64,28 @@ namespace llvm { explicit PIC16TargetLowering(PIC16TargetMachine &TM); /// LowerOperation - Provide custom lowering hooks for some operations. - virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); - SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFrameIndex(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG); + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG); + SDValue LowerRET(SDValue Op, SelectionDAG &DAG); + SDValue LowerFrameIndex(SDValue Op, SelectionDAG &DAG); + SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG); - SDOperand RemoveHiLo(SDNode *, SelectionDAG &DAG, + SDValue RemoveHiLo(SDNode *, SelectionDAG &DAG, DAGCombinerInfo &DCI) const; - SDOperand LowerADDSUB(SDNode *, SelectionDAG &DAG, + SDValue LowerADDSUB(SDNode *, SelectionDAG &DAG, DAGCombinerInfo &DCI) const; - SDOperand LowerLOAD(SDNode *, SelectionDAG &DAG, + SDValue LowerLOAD(SDNode *, SelectionDAG &DAG, DAGCombinerInfo &DCI) const; /// getTargetNodeName - This method returns the name of a target specific // DAG node. virtual const char *getTargetNodeName(unsigned Opcode) const; - virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; // utility function. - const SDOperand *findLoadi8(const SDOperand &Src, SelectionDAG &DAG) const; + const SDValue *findLoadi8(const SDValue &Src, SelectionDAG &DAG) const; }; } // namespace llvm diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/lib/Target/PowerPC/PPCHazardRecognizers.cpp index 717ce60e51..480e2772e1 100644 --- a/lib/Target/PowerPC/PPCHazardRecognizers.cpp +++ b/lib/Target/PowerPC/PPCHazardRecognizers.cpp @@ -86,7 +86,7 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode, /// isLoadOfStoredAddress - If we have a load from the previously stored pointer /// as indicated by StorePtr1/StorePtr2/StoreSize, return true. bool PPCHazardRecognizer970:: -isLoadOfStoredAddress(unsigned LoadSize, SDOperand Ptr1, SDOperand Ptr2) const { +isLoadOfStoredAddress(unsigned LoadSize, SDValue Ptr1, SDValue Ptr2) const { for (unsigned i = 0, e = NumStores; i != e; ++i) { // Handle exact and commuted addresses. if (Ptr1 == StorePtr1[i] && Ptr2 == StorePtr2[i]) diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.h b/lib/Target/PowerPC/PPCHazardRecognizers.h index 8ec25aed61..7ba2e6e713 100644 --- a/lib/Target/PowerPC/PPCHazardRecognizers.h +++ b/lib/Target/PowerPC/PPCHazardRecognizers.h @@ -41,7 +41,7 @@ class PPCHazardRecognizer970 : public HazardRecognizer { // // This is null if we haven't seen a store yet. We keep track of both // operands of the store here, since we support [r+r] and [r+i] addressing. - SDOperand StorePtr1[4], StorePtr2[4]; + SDValue StorePtr1[4], StorePtr2[4]; unsigned StoreSize[4]; unsigned NumStores; @@ -64,7 +64,7 @@ private: bool &isLoad, bool &isStore); bool isLoadOfStoredAddress(unsigned LoadSize, - SDOperand Ptr1, SDOperand Ptr2) const; + SDValue Ptr1, SDValue Ptr2) const; }; } // end namespace llvm diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index fb27999646..65ab4d2a5b 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -61,18 +61,18 @@ namespace { /// getI32Imm - Return a target constant with the specified value, of type /// i32. - inline SDOperand getI32Imm(unsigned Imm) { + inline SDValue getI32Imm(unsigned Imm) { return CurDAG->getTargetConstant(Imm, MVT::i32); } /// getI64Imm - Return a target constant with the specified value, of type /// i64. - inline SDOperand getI64Imm(uint64_t Imm) { + inline SDValue getI64Imm(uint64_t Imm) { return CurDAG->getTargetConstant(Imm, MVT::i64); } /// getSmallIPtrImm - Return a target constant of pointer type. - inline SDOperand getSmallIPtrImm(unsigned Imm) { + inline SDValue getSmallIPtrImm(unsigned Imm) { return CurDAG->getTargetConstant(Imm, PPCLowering.getPointerTy()); } @@ -94,25 +94,25 @@ namespace { // Select - Convert the specified operand from a target-independent to a // target-specific node if it hasn't already been changed. - SDNode *Select(SDOperand Op); + SDNode *Select(SDValue Op); SDNode *SelectBitfieldInsert(SDNode *N); /// SelectCC - Select a comparison of the specified values with the /// specified condition code, returning the CR# of the expression. - SDOperand SelectCC(SDOperand LHS, SDOperand RHS, ISD::CondCode CC); + SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC); /// SelectAddrImm - Returns true if the address N can be represented by /// a base register plus a signed 16-bit displacement [r+imm]. - bool SelectAddrImm(SDOperand Op, SDOperand N, SDOperand &Disp, - SDOperand &Base) { + bool SelectAddrImm(SDValue Op, SDValue N, SDValue &Disp, + SDValue &Base) { return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG); } /// SelectAddrImmOffs - Return true if the operand is valid for a preinc /// immediate field. Because preinc imms have already been validated, just /// accept it. - bool SelectAddrImmOffs(SDOperand Op, SDOperand N, SDOperand &Out) const { + bool SelectAddrImmOffs(SDValue Op, SDValue N, SDValue &Out) const { Out = N; return true; } @@ -120,33 +120,33 @@ namespace { /// SelectAddrIdx - Given the specified addressed, check to see if it can be /// represented as an indexed [r+r] operation. Returns false if it can /// be represented by [r+imm], which are preferred. - bool SelectAddrIdx(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Index) { + bool SelectAddrIdx(SDValue Op, SDValue N, SDValue &Base, + SDValue &Index) { return PPCLowering.SelectAddressRegReg(N, Base, Index, *CurDAG); } /// SelectAddrIdxOnly - Given the specified addressed, force it to be /// represented as an indexed [r+r] operation. - bool SelectAddrIdxOnly(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Index) { + bool SelectAddrIdxOnly(SDValue Op, SDValue N, SDValue &Base, + SDValue &Index) { return PPCLowering.SelectAddressRegRegOnly(N, Base, Index, *CurDAG); } /// SelectAddrImmShift - Returns true if the address N can be represented by /// a base register plus a signed 14-bit displacement [r+imm*4]. Suitable /// for use by STD and friends. - bool SelectAddrImmShift(SDOperand Op, SDOperand N, SDOperand &Disp, - SDOperand &Base) { + bool SelectAddrImmShift(SDValue Op, SDValue N, SDValue &Disp, + SDValue &Base) { return PPCLowering.SelectAddressRegImmShift(N, Disp, Base, *CurDAG); } /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for /// inline asm expressions. - virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op, + virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, - std::vector<SDOperand> &OutOps, + std::vector<SDValue> &OutOps, SelectionDAG &DAG) { - SDOperand Op0, Op1; + SDValue Op0, Op1; switch (ConstraintCode) { default: return true; case 'm': // memory @@ -170,8 +170,8 @@ namespace { return false; } - SDOperand BuildSDIVSequence(SDNode *N); - SDOperand BuildUDIVSequence(SDNode *N); + SDValue BuildSDIVSequence(SDNode *N); + SDValue BuildUDIVSequence(SDNode *N); /// InstructionSelect - This callback is invoked by /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. @@ -197,7 +197,7 @@ namespace { #include "PPCGenDAGISel.inc" private: - SDNode *SelectSETCC(SDOperand Op); + SDNode *SelectSETCC(SDValue Op); }; } @@ -313,7 +313,7 @@ static bool isIntS16Immediate(SDNode *N, short &Imm) { return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue(); } -static bool isIntS16Immediate(SDOperand Op, short &Imm) { +static bool isIntS16Immediate(SDValue Op, short &Imm) { return isIntS16Immediate(Op.Val, Imm); } @@ -340,7 +340,7 @@ static bool isInt64Immediate(SDNode *N, uint64_t &Imm) { // isInt32Immediate - This method tests to see if a constant operand. // If so Imm will receive the 32 bit value. -static bool isInt32Immediate(SDOperand N, unsigned &Imm) { +static bool isInt32Immediate(SDValue N, unsigned &Imm) { return isInt32Immediate(N.Val, Imm); } @@ -418,8 +418,8 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask, /// SelectBitfieldInsert - turn an or of two masked values into /// the rotate left word immediate then mask insert (rlwimi) instruction. SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { - SDOperand Op0 = N->getOperand(0); - SDOperand Op1 = N->getOperand(1); + SDValue Op0 = N->getOperand(0); + SDValue Op1 = N->getOperand(1); APInt LKZ, LKO, RKZ, RKO; CurDAG->ComputeMaskedBits(Op0, APInt::getAllOnesValue(32), LKZ, LKO); @@ -458,7 +458,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { unsigned MB, ME; if (InsertMask && isRunOfOnes(InsertMask, MB, ME)) { - SDOperand Tmp1, Tmp2, Tmp3; + SDValue Tmp1, Tmp2, Tmp3; bool DisjointMask = (TargetMask ^ InsertMask) == 0xFFFFFFFF; if ((Op1Opc == ISD::SHL || Op1Opc == ISD::SRL) && @@ -481,7 +481,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { AddToISelQueue(Tmp3); AddToISelQueue(Op1); SH &= 31; - SDOperand Ops[] = { Tmp3, Op1, getI32Imm(SH), getI32Imm(MB), + SDValue Ops[] = { Tmp3, Op1, getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) }; return CurDAG->getTargetNode(PPC::RLWIMI, MVT::i32, Ops, 5); } @@ -491,7 +491,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) { /// SelectCC - Select a comparison of the specified values with the specified /// condition code, returning the CR# of the expression. -SDOperand PPCDAGToDAGISel::SelectCC(SDOperand LHS, SDOperand RHS, +SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC) { // Always select the LHS. AddToISelQueue(LHS); @@ -503,11 +503,11 @@ SDOperand PPCDAGToDAGISel::SelectCC(SDOperand LHS, SDOperand RHS, if (isInt32Immediate(RHS, Imm)) { // SETEQ/SETNE comparison with 16-bit immediate, fold it. if (isUInt16(Imm)) - return SDOperand(CurDAG->getTargetNode(PPC::CMPLWI, MVT::i32, LHS, + return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, MVT::i32, LHS, getI32Imm(Imm & 0xFFFF)), 0); // If this is a 16-bit signed immediate, fold it. if (isInt16((int)Imm)) - return SDOperand(CurDAG->getTargetNode(PPC::CMPWI, MVT::i32, LHS, + return SDValue(CurDAG->getTargetNode(PPC::CMPWI, MVT::i32, LHS, getI32Imm(Imm & 0xFFFF)), 0); // For non-equality comparisons, the default code would materialize the @@ -519,21 +519,21 @@ SDOperand PPCDAGToDAGISel::SelectCC(SDOperand LHS, SDOperand RHS, // xoris r0,r3,0x1234 // cmplwi cr0,r0,0x5678 // beq cr0,L6 - SDOperand Xor(CurDAG->getTargetNode(PPC::XORIS, MVT::i32, LHS, + SDValue Xor(CurDAG->getTargetNode(PPC::XORIS, MVT::i32, LHS, getI32Imm(Imm >> 16)), 0); - return SDOperand(CurDAG->getTargetNode(PPC::CMPLWI, MVT::i32, Xor, + return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, MVT::i32, Xor, getI32Imm(Imm & 0xFFFF)), 0); } Opc = PPC::CMPLW; } else if (ISD::isUnsignedIntSetCC(CC)) { if (isInt32Immediate(RHS, Imm) && isUInt16(Imm)) - return SDOperand(CurDAG->getTargetNode(PPC::CMPLWI, MVT::i32, LHS, + return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, MVT::i32, LHS, getI32Imm(Imm & 0xFFFF)), 0); Opc = PPC::CMPLW; } else { short SImm; if (isIntS16Immediate(RHS, SImm)) - return SDOperand(CurDAG->getTargetNode(PPC::CMPWI, MVT::i32, LHS, + return SDValue(CurDAG->getTargetNode(PPC::CMPWI, MVT::i32, LHS, getI32Imm((int)SImm & 0xFFFF)), 0); Opc = PPC::CMPW; @@ -544,11 +544,11 @@ SDOperand PPCDAGToDAGISel::SelectCC(SDOperand LHS, SDOperand RHS, if (isInt64Immediate(RHS.Val, Imm)) { // SETEQ/SETNE comparison with 16-bit immediate, fold it. if (isUInt16(Imm)) - return SDOperand(CurDAG->getTargetNode(PPC::CMPLDI, MVT::i64, LHS, + return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, MVT::i64, LHS, getI32Imm(Imm & 0xFFFF)), 0); // If this is a 16-bit signed immediate, fold it. if (isInt16(Imm)) - return SDOperand(CurDAG->getTargetNode(PPC::CMPDI, MVT::i64, LHS, + return SDValue(CurDAG->getTargetNode(PPC::CMPDI, MVT::i64, LHS, getI32Imm(Imm & 0xFFFF)), 0); // For non-equality comparisons, the default code would materialize the @@ -561,22 +561,22 @@ SDOperand PPCDAGToDAGISel::SelectCC(SDOperand LHS, SDOperand RHS, // cmpldi cr0,r0,0x5678 // beq cr0,L6 if (isUInt32(Imm)) { - SDOperand Xor(CurDAG->getTargetNode(PPC::XORIS8, MVT::i64, LHS, + SDValue Xor(CurDAG->getTargetNode(PPC::XORIS8, MVT::i64, LHS, getI64Imm(Imm >> 16)), 0); - return SDOperand(CurDAG->getTargetNode(PPC::CMPLDI, MVT::i64, Xor, + return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, MVT::i64, Xor, getI64Imm(Imm & 0xFFFF)), 0); } } Opc = PPC::CMPLD; } else if (ISD::isUnsignedIntSetCC(CC)) { if (isInt64Immediate(RHS.Val, Imm) && isUInt16(Imm)) - return SDOperand(CurDAG->getTargetNode(PPC::CMPLDI, MVT::i64, LHS, + return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, MVT::i64, LHS, getI64Imm(Imm & 0xFFFF)), 0); Opc = PPC::CMPLD; } else { short SImm; if (isIntS16Immediate(RHS, SImm)) - return SDOperand(CurDAG->getTargetNode(PPC::CMPDI, MVT::i64, LHS, + return SDValue(CurDAG->getTargetNode(PPC::CMPDI, MVT::i64, LHS, getI64Imm(SImm & 0xFFFF)), 0); Opc = PPC::CMPD; @@ -588,7 +588,7 @@ SDOperand PPCDAGToDAGISel::SelectCC(SDOperand LHS, SDOperand RHS, Opc = PPC::FCMPUD; } AddToISelQueue(RHS); - return SDOperand(CurDAG->getTargetNode(Opc, MVT::i32, LHS, RHS), 0); + return SDValue(CurDAG->getTargetNode(Opc, MVT::i32, LHS, RHS), 0); } static PPC::Predicate getPredicateForSetCC(ISD::CondCode CC) { @@ -653,7 +653,7 @@ static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert, int &Other) { return 0; } -SDNode *PPCDAGToDAGISel::SelectSETCC(SDOperand Op) { +SDNode *PPCDAGToDAGISel::SelectSETCC(SDValue Op) { SDNode *N = Op.Val; unsigned Imm; ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); @@ -662,64 +662,64 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDOperand Op) { // Check for those cases here. // setcc op, 0 if (Imm == 0) { - SDOperand Op = N->getOperand(0); + SDValue Op = N->getOperand(0); AddToISelQueue(Op); switch (CC) { default: break; case ISD::SETEQ: { - Op = SDOperand(CurDAG->getTargetNode(PPC::CNTLZW, MVT::i32, Op), 0); - SDOperand Ops[] = { Op, getI32Imm(27), getI32Imm(5), getI32Imm(31) }; + Op = SDValue(CurDAG->getTargetNode(PPC::CNTLZW, MVT::i32, Op), 0); + SDValue Ops[] = { Op, getI32Imm(27), getI32Imm(5), getI32Imm(31) }; return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); } case ISD::SETNE: { - SDOperand AD = - SDOperand(CurDAG->getTargetNode(PPC::ADDIC, MVT::i32, MVT::Flag, + SDValue AD = + SDValue(CurDAG->getTargetNode(PPC::ADDIC, MVT::i32, MVT::Flag, Op, getI32Imm(~0U)), 0); return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op, AD.getValue(1)); } case ISD::SETLT: { - SDOperand Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) }; + SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) }; return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); } case ISD::SETGT: { - SDOperand T = - SDOperand(CurDAG->getTargetNode(PPC::NEG, MVT::i32, Op), 0); - T = SDOperand(CurDAG->getTargetNode(PPC::ANDC, MVT::i32, T, Op), 0); - SDOperand Ops[] = { T, getI32Imm(1), getI32Imm(31), getI32Imm(31) }; + SDValue T = + SDValue(CurDAG->getTargetNode(PPC::NEG, MVT::i32, Op), 0); + T = SDValue(CurDAG->getTargetNode(PPC::ANDC, MVT::i32, T, Op), 0); + SDValue Ops[] = { T, getI32Imm(1), getI32Imm(31), getI32Imm(31) }; return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); } } } else if (Imm == ~0U) { // setcc op, -1 - SDOperand Op = N->getOperand(0); + SDValue Op = N->getOperand(0); AddToISelQueue(Op); switch (CC) { default: break; case ISD::SETEQ: - Op = SDOperand(CurDAG->getTargetNode(PPC::ADDIC, MVT::i32, MVT::Flag, + Op = SDValue(CurDAG->getTargetNode(PPC::ADDIC, MVT::i32, MVT::Flag, Op, getI32Imm(1)), 0); return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, - SDOperand(CurDAG->getTargetNode(PPC::LI, MVT::i32, + SDValue(CurDAG->getTargetNode(PPC::LI, MVT::i32, getI32Imm(0)), 0), Op.getValue(1)); case ISD::SETNE: { - Op = SDOperand(CurDAG->getTargetNode(PPC::NOR, MVT::i32, Op, Op), 0); + Op = SDValue(CurDAG->getTargetNode(PPC::NOR, MVT::i32, Op, Op), 0); SDNode *AD = CurDAG->getTargetNode(PPC::ADDIC, MVT::i32, MVT::Flag, Op, getI32Imm(~0U)); - return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDOperand(AD, 0), - Op, SDOperand(AD, 1)); + return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0), + Op, SDValue(AD, 1)); } case ISD::SETLT: { - SDOperand AD = SDOperand(CurDAG->getTargetNode(PPC::ADDI, MVT::i32, Op, + SDValue AD = SDValue(CurDAG->getTargetNode(PPC::ADDI, MVT::i32, Op, getI32Imm(1)), 0); - SDOperand AN = SDOperand(CurDAG->getTargetNode(PPC::AND, MVT::i32, AD, + SDValue AN = SDValue(CurDAG->getTargetNode(PPC::AND, MVT::i32, AD, Op), 0); - SDOperand Ops[] = { AN, getI32Imm(1), getI32Imm(31), getI32Imm(31) }; + SDValue Ops[] = { AN, getI32Imm(1), getI32Imm(31), getI32Imm(31) }; return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); } case ISD::SETGT: { - SDOperand Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) }; - Op = SDOperand(CurDAG->getTargetNode(PPC::RLWINM, MVT::i32, Ops, 4), 0); + SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) }; + Op = SDValue(CurDAG->getTargetNode(PPC::RLWINM, MVT::i32, Ops, 4), 0); return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op, getI32Imm(1)); } @@ -730,30 +730,30 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDOperand Op) { bool Inv; int OtherCondIdx; unsigned Idx = getCRIdxForSetCC(CC, Inv, OtherCondIdx); - SDOperand CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC); - SDOperand IntCR; + SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC); + SDValue IntCR; // Force the ccreg into CR7. - SDOperand CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32); + SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32); - SDOperand InFlag(0, 0); // Null incoming flag value. + SDValue InFlag(0, 0); // Null incoming flag value. CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), CR7Reg, CCReg, InFlag).getValue(1); if (PPCSubTarget.isGigaProcessor() && OtherCondIdx == -1) - IntCR = SDOperand(CurDAG->getTargetNode(PPC::MFOCRF, MVT::i32, CR7Reg, + IntCR = SDValue(CurDAG->getTargetNode(PPC::MFOCRF, MVT::i32, CR7Reg, CCReg), 0); else - IntCR = SDOperand(CurDAG->getTargetNode(PPC::MFCR, MVT::i32, CCReg), 0); + IntCR = SDValue(CurDAG->getTargetNode(PPC::MFCR, MVT::i32, CCReg), 0); - SDOperand Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31), + SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31), getI32Imm(31), getI32Imm(31) }; if (OtherCondIdx == -1 && !Inv) return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); // Get the specified bit. - SDOperand Tmp = - SDOperand(CurDAG->getTargetNode(PPC::RLWINM, MVT::i32, Ops, 4), 0); + SDValue Tmp = + SDValue(CurDAG->getTargetNode(PPC::RLWINM, MVT::i32, Ops, 4), 0); if (Inv) { assert(OtherCondIdx == -1 && "Can't have split plus negation"); return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1)); @@ -764,8 +764,8 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDOperand Op) { // Get the other bit of the comparison. Ops[1] = getI32Imm((32-(3-OtherCondIdx)) & 31); - SDOperand OtherCond = - SDOperand(CurDAG->getTargetNode(PPC::RLWINM, MVT::i32, Ops, 4), 0); + SDValue OtherCond = + SDValue(CurDAG->getTargetNode(PPC::RLWINM, MVT::i32, Ops, 4), 0); return CurDAG->SelectNodeTo(N, PPC::OR, MVT::i32, Tmp, OtherCond); } @@ -773,7 +773,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDOperand Op) { // Select - Convert the specified operand from a target-independent to a // target-specific node if it hasn't already been changed. -SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { +SDNode *PPCDAGToDAGISel::Select(SDValue Op) { SDNode *N = Op.Val; if (N->isMachineOpcode()) return NULL; // Already selected. @@ -824,7 +824,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { Result = CurDAG->getTargetNode(OpC, MVT::i64, getI32Imm(Hi)); // And Lo bits. Result = CurDAG->getTargetNode(PPC::ORI8, MVT::i64, - SDOperand(Result, 0), getI32Imm(Lo)); + SDValue(Result, 0), getI32Imm(Lo)); } else { // Just the Hi bits. Result = CurDAG->getTargetNode(PPC::LIS8, MVT::i64, getI32Imm(Hi)); @@ -836,18 +836,18 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { // Shift for next step if the upper 32-bits were not zero. if (Imm) { Result = CurDAG->getTargetNode(PPC::RLDICR, MVT::i64, - SDOperand(Result, 0), + SDValue(Result, 0), getI32Imm(Shift), getI32Imm(63 - Shift)); } // Add in the last bits as required. if ((Hi = (Remainder >> 16) & 0xFFFF)) { Result = CurDAG->getTargetNode(PPC::ORIS8, MVT::i64, - SDOperand(Result, 0), getI32Imm(Hi)); + SDValue(Result, 0), getI32Imm(Hi)); } if ((Lo = Remainder & 0xFFFF)) { Result = CurDAG->getTargetNode(PPC::ORI8, MVT::i64, - SDOperand(Result, 0), getI32Imm(Lo)); + SDValue(Result, 0), getI32Imm(Lo)); } return Result; @@ -862,7 +862,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { case ISD::FrameIndex: { int FI = cast<FrameIndexSDNode>(N)->getIndex(); - SDOperand TFI = CurDAG->getTargetFrameIndex(FI, Op.getValueType()); + SDValue TFI = CurDAG->getTargetFrameIndex(FI, Op.getValueType()); unsigned Opc = Op.getValueType() == MVT::i32 ? PPC::ADDI : PPC::ADDI8; if (N->hasOneUse()) return CurDAG->SelectNodeTo(N, Opc, Op.getValueType(), TFI, @@ -872,7 +872,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { } case PPCISD::MFCR: { - SDOperand InFlag = N->getOperand(1); + SDValue InFlag = N->getOperand(1); AddToISelQueue(InFlag); // Use MFOCRF if supported. if (PPCSubTarget.isGigaProcessor()) @@ -890,21 +890,21 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { // sra/addze rather than having to handle sdiv ourselves. oh well. unsigned Imm; if (isInt32Immediate(N->getOperand(1), Imm)) { - SDOperand N0 = N->getOperand(0); + SDValue N0 = N->getOperand(0); AddToISelQueue(N0); if ((signed)Imm > 0 && isPowerOf2_32(Imm)) { SDNode *Op = CurDAG->getTargetNode(PPC::SRAWI, MVT::i32, MVT::Flag, N0, getI32Imm(Log2_32(Imm))); return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, - SDOperand(Op, 0), SDOperand(Op, 1)); + SDValue(Op, 0), SDValue(Op, 1)); } else if ((signed)Imm < 0 && isPowerOf2_32(-Imm)) { SDNode *Op = CurDAG->getTargetNode(PPC::SRAWI, MVT::i32, MVT::Flag, N0, getI32Imm(Log2_32(-Imm))); - SDOperand PT = - SDOperand(CurDAG->getTargetNode(PPC::ADDZE, MVT::i32, - SDOperand(Op, 0), SDOperand(Op, 1)), + SDValue PT = + SDValue(CurDAG->getTargetNode(PPC::ADDZE, MVT::i32, + SDValue(Op, 0), SDValue(Op, 1)), 0); return CurDAG->SelectNodeTo(N, PPC::NEG, MVT::i32, PT); } @@ -923,7 +923,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { if (LD->getAddressingMode() != ISD::PRE_INC) break; - SDOperand Offset = LD->getOffset(); + SDValue Offset = LD->getOffset(); if (isa<ConstantSDNode>(Offset) || Offset.getOpcode() == ISD::TargetGlobalAddress) { @@ -954,12 +954,12 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { } } - SDOperand Chain = LD->getChain(); - SDOperand Base = LD->getBasePtr(); + SDValue Chain = LD->getChain(); + SDValue Base = LD->getBasePtr(); AddToISelQueue(Chain); AddToISelQueue(Base); AddToISelQueue(Offset); - SDOperand Ops[] = { Offset, Base, Chain }; + SDValue Ops[] = { Offset, Base, Chain }; // FIXME: PPC64 return CurDAG->getTargetNode(Opcode, LD->getValueType(0), PPCLowering.getPointerTy(), @@ -976,9 +976,9 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { // with a mask, emit rlwinm if (isInt32Immediate(N->getOperand(1), Imm) && isRotateAndMask(N->getOperand(0).Val, Imm, false, SH, MB, ME)) { - SDOperand Val = N->getOperand(0).getOperand(0); + SDValue Val = N->getOperand(0).getOperand(0); AddToISelQueue(Val); - SDOperand Ops[] = { Val, getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) }; + SDValue Ops[] = { Val, getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) }; return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); } // If this is just a masked value where the input is not handled above, and @@ -986,15 +986,15 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { if (isInt32Immediate(N->getOperand(1), Imm) && isRunOfOnes(Imm, MB, ME) && N->getOperand(0).getOpcode() != ISD::ROTL) { - SDOperand Val = N->getOperand(0); + SDValue Val = N->getOperand(0); AddToISelQueue(Val); - SDOperand Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) }; + SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) }; return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); } // AND X, 0 -> 0, not "rlwinm 32". if (isInt32Immediate(N->getOperand(1), Imm) && (Imm == 0)) { AddToISelQueue(N->getOperand(1)); - ReplaceUses(SDOperand(N, 0), N->getOperand(1)); + ReplaceUses(SDValue(N, 0), N->getOperand(1)); return NULL; } // ISD::OR doesn't get all the bitfield insertion fun. @@ -1007,7 +1007,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { if (isRunOfOnes(Imm, MB, ME)) { AddToISelQueue(N->getOperand(0).getOperand(0)); AddToISelQueue(N->getOperand(0).getOperand(1)); - SDOperand Ops[] = { N->getOperand(0).getOperand(0), + SDValue Ops[] = { N->getOperand(0).getOperand(0), N->getOperand(0).getOperand(1), getI32Imm(0), getI32Imm(MB),getI32Imm(ME) }; return CurDAG->getTargetNode(PPC::RLWIMI, MVT::i32, Ops, 5); @@ -1029,7 +1029,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { if (isOpcWithIntImmediate(N->getOperand(0).Val, ISD::AND, Imm) && isRotateAndMask(N, Imm, true, SH, MB, ME)) { AddToISelQueue(N->getOperand(0).getOperand(0)); - SDOperand Ops[] = { N->getOperand(0).getOperand(0), + SDValue Ops[] = { N->getOperand(0).getOperand(0), getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) }; return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); } @@ -1042,7 +1042,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { if (isOpcWithIntImmediate(N->getOperand(0).Val, ISD::AND, Imm) && isRotateAndMask(N, Imm, true, SH, MB, ME)) { AddToISelQueue(N->getOperand(0).getOperand(0)); - SDOperand Ops[] = { N->getOperand(0).getOperand(0), + SDValue Ops[] = { N->getOperand(0).getOperand(0), getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) }; return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); } @@ -1066,11 +1066,11 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { CurDAG->getTargetNode(PPC::ADDIC, MVT::i32, MVT::Flag, N->getOperand(0), getI32Imm(~0U)); return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, - SDOperand(Tmp, 0), N->getOperand(0), - SDOperand(Tmp, 1)); + SDValue(Tmp, 0), N->getOperand(0), + SDValue(Tmp, 1)); } - SDOperand CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC); + SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC); unsigned BROpc = getPredicateForSetCC(CC); unsigned SelectCCOp; @@ -1087,7 +1087,7 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { AddToISelQueue(N->getOperand(2)); AddToISelQueue(N->getOperand(3)); - SDOperand Ops[] = { CCReg, N->getOperand(2), N->getOperand(3), + SDValue Ops[] = { CCReg, N->getOperand(2), N->getOperand(3), getI32Imm(BROpc) }; return CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops, 4); } @@ -1098,28 +1098,28 @@ SDNode *PPCDAGToDAGISel::Select(SDOperand Op) { // Op #3 is the Dest MBB AddToISelQueue(N->getOperand(4)); // Op #4 is the Flag. // Prevent PPC::PRED_* from being selected into LI. - SDOperand Pred = + SDValue Pred = getI32Imm(cast<ConstantSDNode>(N->getOperand(1))->getValue()); - SDOperand Ops[] = { Pred, N->getOperand(2), N->getOperand(3), + SDValue Ops[] = { Pred, N->getOperand(2), N->getOperand(3), N->getOperand(0), N->getOperand(4) }; return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 5); } case ISD::BR_CC: { AddToISelQueue(N->getOperand(0)); ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); - SDOperand CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC); - SDOperand Ops[] = { getI32Imm(getPredicateForSetCC(CC)), CondCode, + SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC); + SDValue Ops[] = { getI32Imm(getPredicateForSetCC(CC)), CondCode, N->getOperand(4), N->getOperand(0) }; return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 4); } case ISD::BRIND: { // FIXME: Should custom lower this. - SDOperand Chain = N->getOperand(0); - SDOperand Target = N->getOperand(1); + SDValue Chain = N->getOperand(0); + SDValue Target = N->getOperand(1); AddToISelQueue(Chain); AddToISelQueue(Target); unsigned Opc = Target.getValueType() == MVT::i32 ? PPC::MTCTR : PPC::MTCTR8; - Chain = SDOperand(CurDAG->getTargetNode(Opc, MVT::Other, Target, + Chain = SDValue(CurDAG->getTargetNode(Opc, MVT::Other, Target, Chain), 0); return CurDAG->SelectNodeTo(N, PPC::BCTR, MVT::Other, Chain); } diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index e8e16591b1..1fe2e8c0ce 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -422,7 +422,7 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { } -MVT PPCTargetLowering::getSetCCResultType(const SDOperand &) const { +MVT PPCTargetLowering::getSetCCResultType(const SDValue &) const { return MVT::i32; } @@ -432,7 +432,7 @@ MVT PPCTargetLowering::getSetCCResultType(const SDOperand &) const { //===----------------------------------------------------------------------===// /// isFloatingPointZero - Return true if this is 0.0 or -0.0. -static bool isFloatingPointZero(SDOperand Op) { +static bool isFloatingPointZero(SDValue Op) { if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) return CFP->getValueAPF().isZero(); else if (ISD::isEXTLoad(Op.Val) || ISD::isNON_EXTLoad(Op.Val)) { @@ -446,7 +446,7 @@ static bool isFloatingPointZero(SDOperand Op) { /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return /// true if Op is undef or if it matches the specified value. -static bool isConstantOrUndef(SDOperand Op, unsigned Val) { +static bool isConstantOrUndef(SDValue Op, unsigned Val) { return Op.getOpcode() == ISD::UNDEF || cast<ConstantSDNode>(Op)->getValue() == Val; } @@ -567,7 +567,7 @@ bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) { // This is a splat operation if each element of the permute is the same, and // if the value doesn't reference the second vector. unsigned ElementBase = 0; - SDOperand Elt = N->getOperand(0); + SDValue Elt = N->getOperand(0); if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt)) ElementBase = EltV->getValue(); else @@ -617,8 +617,8 @@ unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) { /// by using a vspltis[bhw] instruction of the specified element size, return /// the constant being splatted. The ByteSize field indicates the number of /// bytes of each element [124] -> [bhw]. -SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { - SDOperand OpVal(0, 0); +SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { + SDValue OpVal(0, 0); // If ByteSize of the splat is bigger than the element size of the // build_vector, then we have a case where we are checking for a splat where @@ -627,20 +627,20 @@ SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { unsigned EltSize = 16/N->getNumOperands(); if (EltSize < ByteSize) { unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. - SDOperand UniquedVals[4]; + SDValue UniquedVals[4]; assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); // See if all of the elements in the buildvector agree across. for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue; // If the element isn't a constant, bail fully out. - if (!isa<ConstantSDNode>(N->getOperand(i))) return SDOperand(); + if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); if (UniquedVals[i&(Multiple-1)].Val == 0) UniquedVals[i&(Multiple-1)] = N->getOperand(i); else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) - return SDOperand(); // no match. + return SDValue(); // no match. } // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains @@ -673,7 +673,7 @@ SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { return DAG.getTargetConstant(Val, MVT::i32); } - return SDOperand(); + return SDValue(); } // Check to see if this buildvec has a single non-undef value in its elements. @@ -682,10 +682,10 @@ SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { if (OpVal.Val == 0) OpVal = N->getOperand(i); else if (OpVal != N->getOperand(i)) - return SDOperand(); + return SDValue(); } - if (OpVal.Val == 0) return SDOperand(); // All UNDEF: use implicit def. + if (OpVal.Val == 0) return SDValue(); // All UNDEF: use implicit def. unsigned ValSizeInBytes = 0; uint64_t Value = 0; @@ -701,7 +701,7 @@ SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { // If the splat value is larger than the element value, then we can never do // this splat. The only case that we could fit the replicated bits into our // immediate field for would be zero, and we prefer to use vxor for it. - if (ValSizeInBytes < ByteSize) return SDOperand(); + if (ValSizeInBytes < ByteSize) return SDValue(); // If the element value is larger than the splat value, cut it in half and // check to see if the two halves are equal. Continue doing this until we @@ -712,7 +712,7 @@ SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { // If the top half equals the bottom half, we're still ok. if (((Value >> (ValSizeInBytes*8)) & ((1 << (8*ValSizeInBytes))-1)) != (Value & ((1 << (8*ValSizeInBytes))-1))) - return SDOperand(); + return SDValue(); } // Properly sign extend the value. @@ -720,12 +720,12 @@ SDOperand PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { int MaskVal = ((int)Value << ShAmt) >> ShAmt; // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. - if (MaskVal == 0) return SDOperand(); + if (MaskVal == 0) return SDValue(); // Finally, if this value fits in a 5 bit sext field, return it if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) return DAG.getTargetConstant(MaskVal, MVT::i32); - return SDOperand(); + return SDValue(); } //===----------------------------------------------------------------------===// @@ -746,7 +746,7 @@ static bool isIntS16Immediate(SDNode *N, short &Imm) { else return Imm == (int64_t)cast<ConstantSDNode>(N)->getValue(); } -static bool isIntS16Immediate(SDOperand Op, short &Imm) { +static bool isIntS16Immediate(SDValue Op, short &Imm) { return isIntS16Immediate(Op.Val, Imm); } @@ -754,8 +754,8 @@ static bool isIntS16Immediate(SDOperand Op, short &Imm) { /// SelectAddressRegReg - Given the specified addressed, check to see if it /// can be represented as an indexed [r+r] operation. Returns false if it /// can be more efficiently represented with [r+imm]. -bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base, - SDOperand &Index, +bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, + SDValue &Index, SelectionDAG &DAG) { short imm = 0; if (N.getOpcode() == ISD::ADD) { @@ -802,8 +802,8 @@ bool PPCTargetLowering::SelectAddressRegReg(SDOperand N, SDOperand &Base, /// Returns true if the address N can be represented by a base register plus /// a signed 16-bit displacement [r+imm], and if it is not better /// represented as reg+reg. -bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp, - SDOperand &Base, SelectionDAG &DAG){ +bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, + SDValue &Base, SelectionDAG &DAG){ // If this can be more profitably realized as r+r, fail. if (SelectAddressRegReg(N, Disp, Base, DAG)) return false; @@ -871,7 +871,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp, Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, MVT::i32); unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; - Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); + Base = SDValue(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); return true; } } @@ -886,8 +886,8 @@ bool PPCTargetLowering::SelectAddressRegImm(SDOperand N, SDOperand &Disp, /// SelectAddressRegRegOnly - Given the specified addressed, force it to be /// represented as an indexed [r+r] operation. -bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base, - SDOperand &Index, +bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, + SDValue &Index, SelectionDAG &DAG) { // Check to see if we can easily represent this as an [r+r] address. This // will fail if it thinks that the address is more profitably represented as @@ -913,8 +913,8 @@ bool PPCTargetLowering::SelectAddressRegRegOnly(SDOperand N, SDOperand &Base, /// SelectAddressRegImmShift - Returns true if the address N can be /// represented by a base register plus a signed 14-bit displacement /// [r+imm*4]. Suitable for use by STD and friends. -bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp, - SDOperand &Base, +bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp, + SDValue &Base, SelectionDAG &DAG) { // If this can be more profitably realized as r+r, fail. if (SelectAddressRegReg(N, Disp, Base, DAG)) @@ -982,7 +982,7 @@ bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp, Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32); unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; - Base = SDOperand(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); + Base = SDValue(DAG.getTargetNode(Opc, CN->getValueType(0), Base), 0); return true; } } @@ -1000,14 +1000,14 @@ bool PPCTargetLowering::SelectAddressRegImmShift(SDOperand N, SDOperand &Disp, /// getPreIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if the node's address /// can be legally represented as pre-indexed load / store address. -bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, - SDOperand &Offset, +bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, + SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) { // Disabled by default for now. if (!EnablePPCPreinc) return false; - SDOperand Ptr; + SDValue Ptr; MVT VT; if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { Ptr = LD->getBasePtr(); @@ -1054,18 +1054,18 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDOperand &Base, // LowerOperation implementation //===----------------------------------------------------------------------===// -SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op, +SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = Op.getValueType(); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); Constant *C = CP->getConstVal(); - SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); - SDOperand Zero = DAG.getConstant(0, PtrVT); + SDValue CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment()); + SDValue Zero = DAG.getConstant(0, PtrVT); const TargetMachine &TM = DAG.getTarget(); - SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero); - SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero); + SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, CPI, Zero); + SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, CPI, Zero); // If this is a non-darwin platform, we don't support non-static relo models // yet. @@ -1086,16 +1086,16 @@ SDOperand PPCTargetLowering::LowerConstantPool(SDOperand Op, return Lo; } -SDOperand PPCTargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); - SDOperand JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); - SDOperand Zero = DAG.getConstant(0, PtrVT); + SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); + SDValue Zero = DAG.getConstant(0, PtrVT); const TargetMachine &TM = DAG.getTarget(); - SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero); - SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero); + SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, JTI, Zero); + SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, JTI, Zero); // If this is a non-darwin platform, we don't support non-static relo models // yet. @@ -1116,27 +1116,27 @@ SDOperand PPCTargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { return Lo; } -SDOperand PPCTargetLowering::LowerGlobalTLSAddress(SDOperand Op, +SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) { assert(0 && "TLS not implemented for PPC."); - return SDOperand(); // Not reached + return SDValue(); // Not reached } -SDOperand PPCTargetLowering::LowerGlobalAddress(SDOperand Op, +SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) { MVT PtrVT = Op.getValueType(); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalValue *GV = GSDN->getGlobal(); - SDOperand GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); + SDValue GA = DAG.getTargetGlobalAddress(GV, PtrVT, GSDN->getOffset()); // If it's a debug information descriptor, don't mess with it. if (DAG.isVerifiedDebugInfoDesc(Op)) return GA; - SDOperand Zero = DAG.getConstant(0, PtrVT); + SDValue Zero = DAG.getConstant(0, PtrVT); const TargetMachine &TM = DAG.getTarget(); - SDOperand Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero); - SDOperand Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero); + SDValue Hi = DAG.getNode(PPCISD::Hi, PtrVT, GA, Zero); + SDValue Lo = DAG.getNode(PPCISD::Lo, PtrVT, GA, Zero); // If this is a non-darwin platform, we don't support non-static relo models // yet. @@ -1163,7 +1163,7 @@ SDOperand PPCTargetLowering::LowerGlobalAddress(SDOperand Op, return DAG.getLoad(PtrVT, DAG.getEntryNode(), Lo, NULL, 0); } -SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) { ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); // If we're comparing for equality to zero, expose the fact that this is @@ -1172,14 +1172,14 @@ SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { if (C->isNullValue() && CC == ISD::SETEQ) { MVT VT = Op.getOperand(0).getValueType(); - SDOperand Zext = Op.getOperand(0); + SDValue Zext = Op.getOperand(0); if (VT.bitsLT(MVT::i32)) { VT = MVT::i32; Zext = DAG.getNode(ISD::ZERO_EXTEND, VT, Op.getOperand(0)); } unsigned Log2b = Log2_32(VT.getSizeInBits()); - SDOperand Clz = DAG.getNode(ISD::CTLZ, VT, Zext); - SDOperand Scc = DAG.getNode(ISD::SRL, VT, Clz, + SDValue Clz = DAG.getNode(ISD::CTLZ, VT, Zext); + SDValue Scc = DAG.getNode(ISD::SRL, VT, Clz, DAG.getConstant(Log2b, MVT::i32)); return DAG.getNode(ISD::TRUNCATE, MVT::i32, Scc); } @@ -1187,7 +1187,7 @@ SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { // optimized. FIXME: revisit this when we can custom lower all setcc // optimizations. if (C->isAllOnesValue() || C->isNullValue()) - return SDOperand(); + return SDValue(); } // If we have an integer seteq/setne, turn it into a compare against zero @@ -1198,14 +1198,14 @@ SDOperand PPCTargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { MVT LHSVT = Op.getOperand(0).getValueType(); if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { MVT VT = Op.getValueType(); - SDOperand Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), + SDValue Sub = DAG.getNode(ISD::XOR, LHSVT, Op.getOperand(0), Op.getOperand(1)); return DAG.getSetCC(VT, Sub, DAG.getConstant(0, LHSVT), CC); } - return SDOperand(); + return SDValue(); } -SDOperand PPCTargetLowering::LowerVAARG(SDOperand Op, SelectionDAG &DAG, +SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, int VarArgsFrameIndex, int VarArgsStackOffset, unsigned VarArgsNumGPR, @@ -1213,10 +1213,10 @@ SDOperand PPCTargetLowering::LowerVAARG(SDOperand Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) { assert(0 && "VAARG in ELF32 ABI not implemented yet!"); - return SDOperand(); // Not reached + return SDValue(); // Not reached } -SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG, +SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG, int VarArgsFrameIndex, int VarArgsStackOffset, unsigned VarArgsNumGPR, @@ -1227,7 +1227,7 @@ SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG, // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); - SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); + SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV, 0); } @@ -1257,41 +1257,41 @@ SDOperand PPCTargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG, // } va_list[1]; - SDOperand ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8); - SDOperand ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8); + SDValue ArgGPR = DAG.getConstant(VarArgsNumGPR, MVT::i8); + SDValue ArgFPR = DAG.getConstant(VarArgsNumFPR, MVT::i8); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); - SDOperand StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT); - SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); + SDValue StackOffsetFI = DAG.getFrameIndex(VarArgsStackOffset, PtrVT); + SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); uint64_t FrameOffset = PtrVT.getSizeInBits()/8; - SDOperand ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); + SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, PtrVT); uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; - SDOperand ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); + SDValue ConstStackOffset = DAG.getConstant(StackOffset, PtrVT); uint64_t FPROffset = 1; - SDOperand ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); + SDValue ConstFPROffset = DAG.getConstant(FPROffset, PtrVT); const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); // Store first byte : number of int regs - SDOperand firstStore = DAG.getStore(Op.getOperand(0), ArgGPR, + SDValue firstStore = DAG.getStore(Op.getOperand(0), ArgGPR, Op.getOperand(1), SV, 0); uint64_t nextOffset = FPROffset; - SDOperand nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1), + SDValue nextPtr = DAG.getNode(ISD::ADD, PtrVT, Op.getOperand(1), ConstFPROffset); // Store second byte : number of float regs - SDOperand secondStore = + SDValue secondStore = DAG.getStore(firstStore, ArgFPR, nextPtr, SV, nextOffset); nextOffset += StackOffset; nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstStackOffset); // Store second word : arguments given on stack - SDOperand thirdStore = + SDValue thirdStore = DAG.getStore(secondStore, StackOffsetFI, nextPtr, SV, nextOffset); nextOffset += FrameOffset; nextPtr = DAG.getNode(ISD::ADD, PtrVT, nextPtr, ConstFrameOffset); @@ -1324,7 +1324,7 @@ static const unsigned *GetFPR(const PPCSubtarget &Subtarget) { /// CalculateStackSlotSize - Calculates the size reserved for this argument on /// the stack. -static unsigned CalculateStackSlotSize(SDOperand Arg, SDOperand Flag, +static unsigned CalculateStackSlotSize(SDValue Arg, SDValue Flag, bool isVarArg, unsigned PtrByteSize) { MVT ArgVT = Arg.getValueType(); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Flag)->getArgFlags(); @@ -1336,8 +1336,8 @@ static unsigned CalculateStackSlotSize(SDOperand Arg, SDOperand Flag, return ArgSize; } -SDOperand -PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, +SDValue +PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex, int &VarArgsStackOffset, @@ -1349,8 +1349,8 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); - SmallVector<SDOperand, 8> ArgValues; - SDOperand Root = Op.getOperand(0); + SmallVector<SDValue, 8> ArgValues; + SDValue Root = Op.getOperand(0); bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); @@ -1449,10 +1449,10 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, // represented with two words (long long or double) must be copied to an // even GPR_idx value or to an even ArgOffset value. - SmallVector<SDOperand, 8> MemOps; + SmallVector<SDValue, 8> MemOps; unsigned nAltivecParamsAtEnd = 0; for (unsigned ArgNo = 0, e = Op.Val->getNumValues()-1; ArgNo != e; ++ArgNo) { - SDOperand ArgVal; + SDValue ArgVal; bool needsLoad = false; MVT ObjectVT = Op.getValue(ArgNo).getValueType(); unsigned ObjSize = ObjectVT.getSizeInBits()/8; @@ -1497,14 +1497,14 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, } // The value of the object is its address. int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset); - SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); ArgValues.push_back(FIN); if (ObjSize==1 || ObjSize==2) { if (GPR_idx != Num_GPR_Regs) { unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); RegInfo.addLiveIn(GPR[GPR_idx], VReg); - SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); - SDOperand Store = DAG.getTruncStore(Val.getValue(1), Val, FIN, + SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT); + SDValue Store = DAG.getTruncStore(Val.getValue(1), Val, FIN, NULL, 0, ObjSize==1 ? MVT::i8 : MVT::i16 ); MemOps.push_back(Store); ++GPR_idx; @@ -1522,9 +1522,9 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); RegInfo.addLiveIn(GPR[GPR_idx], VReg); int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset); - SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); - SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); - SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); + SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT); + SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); ++GPR_idx; if (isMachoABI) ArgOffset += PtrByteSize; @@ -1658,7 +1658,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset + (ArgSize - ObjSize), isImmutable); - SDOperand FIN = DAG.getFrameIndex(FI, PtrVT); + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); ArgVal = DAG.getLoad(ObjectVT, Root, FIN, NULL, 0); } @@ -1708,17 +1708,17 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, VarArgsFrameIndex = MFI->CreateFixedObject(PtrVT.getSizeInBits()/8, depth); - SDOperand FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); + SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT); // In ELF 32 ABI, the fixed integer arguments of a variadic function are // stored to the VarArgsFrameIndex on the stack. if (isELF32_ABI) { for (GPR_idx = 0; GPR_idx != VarArgsNumGPR; ++GPR_idx) { - SDOperand Val = DAG.getRegister(GPR[GPR_idx], PtrVT); - SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); + SDValue Val = DAG.getRegister(GPR[GPR_idx], PtrVT); + SDValue Store = DAG.getStore(Root, Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by four for the next argument to store - SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); + SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } } @@ -1734,11 +1734,11 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); RegInfo.addLiveIn(GPR[GPR_idx], VReg); - SDOperand Val = DAG.getCopyFromReg(Root, VReg, PtrVT); - SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); + SDValue Val = DAG.getCopyFromReg(Root, VReg, PtrVT); + SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by four for the next argument to store - SDOperand PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); + SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } @@ -1746,11 +1746,11 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, // on the stack. if (isELF32_ABI) { for (FPR_idx = 0; FPR_idx != VarArgsNumFPR; ++FPR_idx) { - SDOperand Val = DAG.getRegister(FPR[FPR_idx], MVT::f64); - SDOperand Store = DAG.getStore(Root, Val, FIN, NULL, 0); + SDValue Val = DAG.getRegister(FPR[FPR_idx], MVT::f64); + SDValue Store = DAG.getStore(Root, Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by eight for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, + SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } @@ -1760,11 +1760,11 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, VReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); RegInfo.addLiveIn(FPR[FPR_idx], VReg); - SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::f64); - SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); + SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::f64); + SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0); MemOps.push_back(Store); // Increment the address by eight for the next argument to store - SDOperand PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, + SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, PtrVT); FIN = DAG.getNode(ISD::ADD, PtrOff.getValueType(), FIN, PtrOff); } @@ -1789,7 +1789,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, bool isMachoABI, bool isVarArg, unsigned CC, - SDOperand Call, + SDValue Call, unsigned &nAltivecParamsAtEnd) { // Count how many bytes are to be pushed on the stack, including the linkage // area, and parameter passing area. We start with 24/48 bytes, which is @@ -1806,8 +1806,8 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, // 16-byte aligned. nAltivecParamsAtEnd = 0; for (unsigned i = 0; i != NumOps; ++i) { - SDOperand Arg = Call.getOperand(5+2*i); - SDOperand Flag = Call.getOperand(5+2*i+1); + SDValue Arg = Call.getOperand(5+2*i); + SDValue Flag = Call.getOperand(5+2*i+1); MVT ArgVT = Arg.getValueType(); // Varargs Altivec parameters are padded to a 16 byte boundary. if (ArgVT==MVT::v4f32 || ArgVT==MVT::v4i32 || @@ -1871,8 +1871,8 @@ static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool IsTailCall, /// calling conventions match, currently only fastcc supports tail calls, and /// the function CALL is immediatly followed by a RET. bool -PPCTargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, - SDOperand Ret, +PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Call, + SDValue Ret, SelectionDAG& DAG) const { // Variable argument functions are not supported. if (!PerformTailCallOpt || @@ -1890,7 +1890,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, if (Flags.isByVal()) return false; } - SDOperand Callee = Call.getOperand(4); + SDValue Callee = Call.getOperand(4); // Non PIC/GOT tail calls are supported. if (getTargetMachine().getRelocationModel() != Reloc::PIC_) return true; @@ -1908,7 +1908,7 @@ PPCTargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, /// isCallCompatibleAddress - Return the immediate to use if the specified /// 32-bit value is representable in the immediate field of a BxA instruction. -static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) { +static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); if (!C) return 0; @@ -1924,8 +1924,8 @@ static SDNode *isBLACompatibleAddress(SDOperand Op, SelectionDAG &DAG) { namespace { struct TailCallArgumentInfo { - SDOperand Arg; - SDOperand FrameIdxOp; + SDValue Arg; + SDValue FrameIdxOp; int FrameIdx; TailCallArgumentInfo() : FrameIdx(0) {} @@ -1936,12 +1936,12 @@ struct TailCallArgumentInfo { /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, - SDOperand Chain, + SDValue Chain, const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs, - SmallVector<SDOperand, 8> &MemOpChains) { + SmallVector<SDValue, 8> &MemOpChains) { for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { - SDOperand Arg = TailCallArgs[i].Arg; - SDOperand FIN = TailCallArgs[i].FrameIdxOp; + SDValue Arg = TailCallArgs[i].Arg; + SDValue FIN = TailCallArgs[i].FrameIdxOp; int FI = TailCallArgs[i].FrameIdx; // Store relative to framepointer. MemOpChains.push_back(DAG.getStore(Chain, Arg, FIN, @@ -1952,11 +1952,11 @@ StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to /// the appropriate stack slot for the tail call optimized function call. -static SDOperand EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, +static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, MachineFunction &MF, - SDOperand Chain, - SDOperand OldRetAddr, - SDOperand OldFP, + SDValue Chain, + SDValue OldRetAddr, + SDValue OldFP, int SPDiff, bool isPPC64, bool isMachoABI) { @@ -1972,10 +1972,10 @@ static SDOperand EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, int NewFPIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize, NewFPLoc); MVT VT = isPPC64 ? MVT::i64 : MVT::i32; - SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); + SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); Chain = DAG.getStore(Chain, OldRetAddr, NewRetAddrFrIdx, PseudoSourceValue::getFixedStack(NewRetAddr), 0); - SDOperand NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); + SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); Chain = DAG.getStore(Chain, OldFP, NewFramePtrIdx, PseudoSourceValue::getFixedStack(NewFPIdx), 0); } @@ -1986,13 +1986,13 @@ static SDOperand EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, /// the position of the argument. static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, - SDOperand Arg, int SPDiff, unsigned ArgOffset, + SDValue Arg, int SPDiff, unsigned ArgOffset, SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { int Offset = ArgOffset + SPDiff; uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8; int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset); MVT VT = isPPC64 ? MVT::i64 : MVT::i32; - SDOperand FIN = DAG.getFrameIndex(FI, VT); + SDValue FIN = DAG.getFrameIndex(FI, VT); TailCallArgumentInfo Info; Info.Arg = Arg; Info.FrameIdxOp = FIN; @@ -2003,20 +2003,20 @@ CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address /// stack slot. Returns the chain as result and the loaded frame pointers in /// LROpOut/FPOpout. Used when tail calling. -SDOperand PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, +SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, int SPDiff, - SDOperand Chain, - SDOperand &LROpOut, - SDOperand &FPOpOut) { + SDValue Chain, + SDValue &LROpOut, + SDValue &FPOpOut) { if (SPDiff) { // Load the LR and FP stack slot for later adjusting. MVT VT = PPCSubTarget.isPPC64() ? MVT::i64 : MVT::i32; LROpOut = getReturnAddrFrameIndex(DAG); LROpOut = DAG.getLoad(VT, Chain, LROpOut, NULL, 0); - Chain = SDOperand(LROpOut.Val, 1); + Chain = SDValue(LROpOut.Val, 1); FPOpOut = getFramePointerFrameIndex(DAG); FPOpOut = DAG.getLoad(VT, Chain, FPOpOut, NULL, 0); - Chain = SDOperand(FPOpOut.Val, 1); + Chain = SDValue(FPOpOut.Val, 1); } return Chain; } @@ -2027,11 +2027,11 @@ SDOperand PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, /// a byval function parameter. /// Sometimes what we are copying is the end of a larger object, the part that /// does not fit in registers. -static SDOperand -CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, +static SDValue +CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, unsigned Size) { - SDOperand SizeNode = DAG.getConstant(Size, MVT::i32); + SDValue SizeNode = DAG.getConstant(Size, MVT::i32); return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(), false, NULL, 0, NULL, 0); } @@ -2039,15 +2039,15 @@ CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of /// tail calls. static void -LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDOperand Chain, - SDOperand Arg, SDOperand PtrOff, int SPDiff, +LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, + SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, - bool isVector, SmallVector<SDOperand, 8> &MemOpChains, + bool isVector, SmallVector<SDValue, 8> &MemOpChains, SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) { MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); if (!isTailCall) { if (isVector) { - SDOperand StackPtr; + SDValue StackPtr; if (isPPC64) StackPtr = DAG.getRegister(PPC::X1, MVT::i64); else @@ -2061,15 +2061,15 @@ LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDOperand Chain, TailCallArguments); } -SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, +SDValue PPCTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget, TargetMachine &TM) { - SDOperand Chain = Op.getOperand(0); + SDValue Chain = Op.getOperand(0); bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); bool isTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 && CC == CallingConv::Fast && PerformTailCallOpt; - SDOperand Callee = Op.getOperand(4); + SDValue Callee = Op.getOperand(4); unsigned NumOps = (Op.getNumOperands() - 5) / 2; bool isMachoABI = Subtarget.isMachoABI(); @@ -2083,7 +2083,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // args_to_use will accumulate outgoing args for the PPCISD::CALL case in // SelectExpr to use to put the arguments in the appropriate registers. - std::vector<SDOperand> args_to_use; + std::vector<SDValue> args_to_use; // Mark this function as potentially containing a function that contains a // tail call. As a consequence the frame pointer will be used for dynamicalloc @@ -2110,17 +2110,17 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // These operations are automatically eliminated by the prolog/epilog pass Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes, PtrVT)); - SDOperand CallSeqStart = Chain; + SDValue CallSeqStart = Chain; // Load the return address and frame pointer so it can be move somewhere else // later. - SDOperand LROp, FPOp; + SDValue LROp, FPOp; Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp); // Set up a copy of the stack pointer for use loading and storing any // arguments that may not fit in the registers available for argument // passing. - SDOperand StackPtr; + SDValue StackPtr; if (isPPC64) StackPtr = DAG.getRegister(PPC::X1, MVT::i64); else @@ -2153,13 +2153,13 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, const unsigned *GPR = isPPC64 ? GPR_64 : GPR_32; - std::vector<std::pair<unsigned, SDOperand> > RegsToPass; + std::vector<std::pair<unsigned, SDValue> > RegsToPass; SmallVector<TailCallArgumentInfo, 8> TailCallArguments; - SmallVector<SDOperand, 8> MemOpChains; + SmallVector<SDValue, 8> MemOpChains; for (unsigned i = 0; i != NumOps; ++i) { bool inMem = false; - SDOperand Arg = Op.getOperand(5+2*i); + SDValue Arg = Op.getOperand(5+2*i); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); // See if next argument requires stack alignment in ELF @@ -2167,7 +2167,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // PtrOff will be used to store the current argument to the stack if a // register cannot be found for it. - SDOperand PtrOff; + SDValue PtrOff; // Stack align in ELF 32 if (isELF32_ABI && Align) @@ -2195,20 +2195,20 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // Everything else is passed left-justified. MVT VT = (Size==1) ? MVT::i8 : MVT::i16; if (GPR_idx != NumGPRs) { - SDOperand Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg, + SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, PtrVT, Chain, Arg, NULL, 0, VT); MemOpChains.push_back(Load.getValue(1)); RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); if (isMachoABI) ArgOffset += PtrByteSize; } else { - SDOperand Const = DAG.getConstant(4 - Size, PtrOff.getValueType()); - SDOperand AddPtr = DAG.getNode(ISD::ADD, PtrVT, PtrOff, Const); - SDOperand MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr, + SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType()); + SDValue AddPtr = DAG.getNode(ISD::ADD, PtrVT, PtrOff, Const); + SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr, CallSeqStart.Val->getOperand(0), Flags, DAG, Size); // This must go outside the CALLSEQ_START..END. - SDOperand NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, + SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, CallSeqStart.Val->getOperand(1)); DAG.ReplaceAllUsesWith(CallSeqStart.Val, NewCallSeqStart.Val); Chain = CallSeqStart = NewCallSeqStart; @@ -2219,20 +2219,20 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // Copy entire object into memory. There are cases where gcc-generated // code assumes it is there, even if it could be put entirely into // registers. (This is not what the doc says.) - SDOperand MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, + SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, CallSeqStart.Val->getOperand(0), Flags, DAG, Size); // This must go outside the CALLSEQ_START..END. - SDOperand NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, + SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, CallSeqStart.Val->getOperand(1)); DAG.ReplaceAllUsesWith(CallSeqStart.Val, NewCallSeqStart.Val); Chain = CallSeqStart = NewCallSeqStart; // And copy the pieces of it that fit into registers. for (unsigned j=0; j<Size; j+=PtrByteSize) { - SDOperand Const = DAG.getConstant(j, PtrOff.getValueType()); - SDOperand AddArg = DAG.getNode(ISD::ADD, PtrVT, Arg, Const); + SDValue Const = DAG.getConstant(j, PtrOff.getValueType()); + SDValue AddArg = DAG.getNode(ISD::ADD, PtrVT, Arg, Const); if (GPR_idx != NumGPRs) { - SDOperand Load = DAG.getLoad(PtrVT, Chain, AddArg, NULL, 0); + SDValue Load = DAG.getLoad(PtrVT, Chain, AddArg, NULL, 0); MemOpChains.push_back(Load.getValue(1)); RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); if (isMachoABI) @@ -2273,20 +2273,20 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); if (isVarArg) { - SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); + SDValue Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); MemOpChains.push_back(Store); // Float varargs are always shadowed in available integer registers if (GPR_idx != NumGPRs) { - SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); + SDValue Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); MemOpChains.push_back(Load.getValue(1)); if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); } if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ - SDOperand ConstFour = DAG.getConstant(4, PtrOff.getValueType()); + SDValue ConstFour = DAG.getConstant(4, PtrOff.getValueType()); PtrOff = DAG.getNode(ISD::ADD, PtrVT, PtrOff, ConstFour); - SDOperand Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); + SDValue Load = DAG.getLoad(PtrVT, Store, PtrOff, NULL, 0); MemOpChains.push_back(Load.getValue(1)); if (isMachoABI) RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); @@ -2338,10 +2338,10 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // entirely in R registers. Maybe later. PtrOff = DAG.getNode(ISD::ADD, PtrVT, StackPtr, DAG.getConstant(ArgOffset, PtrVT)); - SDOperand Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); + SDValue Store = DAG.getStore(Chain, Arg, PtrOff, NULL, 0); MemOpChains.push_back(Store); if (VR_idx != NumVRs) { - SDOperand Load = DAG.getLoad(MVT::v4f32, Store, PtrOff, NULL, 0); + SDValue Load = DAG.getLoad(MVT::v4f32, Store, PtrOff, NULL, 0); MemOpChains.push_back(Load.getValue(1)); RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); } @@ -2349,9 +2349,9 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, for (unsigned i=0; i<16; i+=PtrByteSize) { if (GPR_idx == NumGPRs) break; - SDOperand Ix = DAG.getNode(ISD::ADD, PtrVT, PtrOff, + SDValue Ix = DAG.getNode(ISD::ADD, PtrVT, PtrOff, DAG.getConstant(i, PtrVT)); - SDOperand Load = DAG.getLoad(PtrVT, Store, Ix, NULL, 0); + SDValue Load = DAG.getLoad(PtrVT, Store, Ix, NULL, 0); MemOpChains.push_back(Load.getValue(1)); RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); } @@ -2384,12 +2384,12 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, ArgOffset = ((ArgOffset+15)/16)*16; ArgOffset += 12*16; for (unsigned i = 0; i != NumOps; ++i) { - SDOperand Arg = Op.getOperand(5+2*i); + SDValue Arg = Op.getOperand(5+2*i); MVT ArgType = Arg.getValueType(); if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { if (++j > NumVRs) { - SDOperand PtrOff; + SDValue PtrOff; // We are emitting Altivec params in order. LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, isPPC64, isTailCall, true, MemOpChains, @@ -2406,7 +2406,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // Build a sequence of copy-to-reg nodes chained together with token chain // and flag operands which copy the outgoing args into the appropriate regs. - SDOperand InFlag; + SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second, InFlag); @@ -2415,7 +2415,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // With the ELF 32 ABI, set CR6 to true if this is a vararg call. if (isVarArg && isELF32_ABI) { - SDOperand SetCR(DAG.getTargetNode(PPC::CRSET, MVT::i32), 0); + SDValue SetCR(DAG.getTargetNode(PPC::CRSET, MVT::i32), 0); Chain = DAG.getCopyToReg(Chain, PPC::CR1EQ, SetCR, InFlag); InFlag = Chain.getValue(1); } @@ -2423,9 +2423,9 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // Emit a sequence of copyto/copyfrom virtual registers for arguments that // might overwrite each other in case of tail call optimization. if (isTailCall) { - SmallVector<SDOperand, 8> MemOpChains2; + SmallVector<SDValue, 8> MemOpChains2; // Do not flag preceeding copytoreg stuff together with the following stuff. - InFlag = SDOperand(); + InFlag = SDValue(); StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, MemOpChains2); if (!MemOpChains2.empty()) @@ -2439,7 +2439,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // Emit callseq_end just before tailcall node. if (isTailCall) { - SmallVector<SDOperand, 8> CallSeqOps; + SmallVector<SDValue, 8> CallSeqOps; SDVTList CallSeqNodeTys = DAG.getVTList(MVT::Other, MVT::Flag); CallSeqOps.push_back(Chain); CallSeqOps.push_back(DAG.getIntPtrConstant(NumBytes)); @@ -2455,7 +2455,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; unsigned CallOpc = isMachoABI? PPCISD::CALL_Macho : PPCISD::CALL_ELF; // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every @@ -2467,11 +2467,11 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType()); else if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) // If this is an absolute destination address, use the munged value. - Callee = SDOperand(Dest, 0); + Callee = SDValue(Dest, 0); else { // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair // to do the call, we can't use PPCISD::CALL. - SDOperand MTCTROps[] = {Chain, Callee, InFlag}; + SDValue MTCTROps[] = {Chain, Callee, InFlag}; Chain = DAG.getNode(PPCISD::MTCTR, NodeTys, MTCTROps, 2+(InFlag.Val!=0)); InFlag = Chain.getValue(1); @@ -2523,7 +2523,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, "Flag must be set. Depend on flag being set in LowerRET"); Chain = DAG.getNode(PPCISD::TAILCALL, Op.Val->getVTList(), &Ops[0], Ops.size()); - return SDOperand(Chain.Val, Op.ResNo); + return SDValue(Chain.Val, Op.ResNo); } Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size()); @@ -2536,7 +2536,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, if (Op.Val->getValueType(0) != MVT::Other) InFlag = Chain.getValue(1); - SmallVector<SDOperand, 16> ResultVals; + SmallVector<SDValue, 16> ResultVals; SmallVector<CCValAssign, 16> RVLocs; unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv(); CCState CCInfo(CallerCC, isVarArg, TM, RVLocs); @@ -2558,12 +2558,12 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG, // Otherwise, merge everything together with a MERGE_VALUES node. ResultVals.push_back(Chain); - SDOperand Res = DAG.getMergeValues(Op.Val->getVTList(), &ResultVals[0], + SDValue Res = DAG.getMergeValues(Op.Val->getVTList(), &ResultVals[0], ResultVals.size()); return Res.getValue(Op.ResNo); } -SDOperand PPCTargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG, +SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG, TargetMachine &TM) { SmallVector<CCValAssign, 16> RVLocs; unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); @@ -2578,13 +2578,13 @@ SDOperand PPCTargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG, DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } - SDOperand Chain = Op.getOperand(0); + SDValue Chain = Op.getOperand(0); Chain = GetPossiblePreceedingTailCall(Chain, PPCISD::TAILCALL); if (Chain.getOpcode() == PPCISD::TAILCALL) { - SDOperand TailCall = Chain; - SDOperand TargetAddress = TailCall.getOperand(1); - SDOperand StackAdjustment = TailCall.getOperand(2); + SDValue TailCall = Chain; + SDValue TargetAddress = TailCall.getOperand(1); + SDValue StackAdjustment = TailCall.getOperand(2); assert(((TargetAddress.getOpcode() == ISD::Register && cast<RegisterSDNode>(TargetAddress)->getReg() == PPC::CTR) || @@ -2596,7 +2596,7 @@ SDOperand PPCTargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG, assert(StackAdjustment.getOpcode() == ISD::Constant && "Expecting a const value"); - SmallVector<SDOperand,8> Operands; + SmallVector<SDValue,8> Operands; Operands.push_back(Chain.getOperand(0)); Operands.push_back(TargetAddress); Operands.push_back(StackAdjustment); @@ -2609,7 +2609,7 @@ SDOperand PPCTargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG, Operands.size()); } - SDOperand Flag; + SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -2625,7 +2625,7 @@ SDOperand PPCTargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG, return DAG.getNode(PPCISD::RET_FLAG, MVT::Other, Chain); } -SDOperand PPCTargetLowering::LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, +SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) { // When we pop the dynamic allocation we need to restore the SP link. @@ -2635,14 +2635,14 @@ SDOperand PPCTargetLowering::LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, // Construct the stack pointer operand. bool IsPPC64 = Subtarget.isPPC64(); unsigned SP = IsPPC64 ? PPC::X1 : PPC::R1; - SDOperand StackPtr = DAG.getRegister(SP, PtrVT); + SDValue StackPtr = DAG.getRegister(SP, PtrVT); // Get the operands for the STACKRESTORE. - SDOperand Chain = Op.getOperand(0); - SDOperand SaveSP = Op.getOperand(1); + SDValue Chain = Op.getOperand(0); + SDValue SaveSP = Op.getOperand(1); // Load the old link SP. - SDOperand LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0); + SDValue LoadLinkSP = DAG.getLoad(PtrVT, Chain, StackPtr, NULL, 0); // Restore the stack pointer. Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), SP, SaveSP); @@ -2653,7 +2653,7 @@ SDOperand PPCTargetLowering::LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, -SDOperand +SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { MachineFunction &MF = DAG.getMachineFunction(); bool IsPPC64 = PPCSubTarget.isPPC64(); @@ -2677,7 +2677,7 @@ PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG & DAG) const { return DAG.getFrameIndex(RASI, PtrVT); } -SDOperand +SDValue PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { MachineFunction &MF = DAG.getMachineFunction(); bool IsPPC64 = PPCSubTarget.isPPC64(); @@ -2702,34 +2702,34 @@ PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { return DAG.getFrameIndex(FPSI, PtrVT); } -SDOperand PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, +SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget) { // Get the inputs. - SDOperand Chain = Op.getOperand(0); - SDOperand Size = Op.getOperand(1); + SDValue Chain = Op.getOperand(0); + SDValue Size = Op.getOperand(1); // Get the corect type for pointers. MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); // Negate the size. - SDOperand NegSize = DAG.getNode(ISD::SUB, PtrVT, + SDValue NegSize = DAG.getNode(ISD::SUB, PtrVT, DAG.getConstant(0, PtrVT), Size); // Construct a node for the frame pointer save index. - SDOperand FPSIdx = getFramePointerFrameIndex(DAG); + SDValue FPSIdx = getFramePointerFrameIndex(DAG); // Build a DYNALLOC node. - SDOperand Ops[3] = { Chain, NegSize, FPSIdx }; + SDValue Ops[3] = { Chain, NegSize, FPSIdx }; SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); return DAG.getNode(PPCISD::DYNALLOC, VTs, Ops, 3); } -SDOperand PPCTargetLowering::LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerAtomicLOAD_ADD(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.Val->getValueType(0); - SDOperand Chain = Op.getOperand(0); - SDOperand Ptr = Op.getOperand(1); - SDOperand Incr = Op.getOperand(2); + SDValue Chain = Op.getOperand(0); + SDValue Ptr = Op.getOperand(1); + SDValue Incr = Op.getOperand(2); SDVTList VTs = DAG.getVTList(VT, MVT::Other); - SDOperand Ops[] = { + SDValue Ops[] = { Chain, Ptr, Incr, @@ -2737,15 +2737,15 @@ SDOperand PPCTargetLowering::LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG return DAG.getNode(PPCISD::ATOMIC_LOAD_ADD, VTs, Ops, 3); } -SDOperand PPCTargetLowering::LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerAtomicCMP_SWAP(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.Val->getValueType(0); - SDOperand Chain = Op.getOperand(0); - SDOperand Ptr = Op.getOperand(1); - SDOperand NewVal = Op.getOperand(2); - SDOperand OldVal = Op.getOperand(3); + SDValue Chain = Op.getOperand(0); + SDValue Ptr = Op.getOperand(1); + SDValue NewVal = Op.getOperand(2); + SDValue OldVal = Op.getOperand(3); SDVTList VTs = DAG.getVTList(VT, MVT::Other); - SDOperand Ops[] = { + SDValue Ops[] = { Chain, Ptr, OldVal, @@ -2754,14 +2754,14 @@ SDOperand PPCTargetLowering::LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG return DAG.getNode(PPCISD::ATOMIC_CMP_SWAP, VTs, Ops, 4); } -SDOperand PPCTargetLowering::LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerAtomicSWAP(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.Val->getValueType(0); - SDOperand Chain = Op.getOperand(0); - SDOperand Ptr = Op.getOperand(1); - SDOperand NewVal = Op.getOperand(2); + SDValue Chain = Op.getOperand(0); + SDValue Ptr = Op.getOperand(1); + SDValue NewVal = Op.getOperand(2); SDVTList VTs = DAG.getVTList(VT, MVT::Other); - SDOperand Ops[] = { + SDValue Ops[] = { Chain, Ptr, NewVal, @@ -2771,21 +2771,21 @@ SDOperand PPCTargetLowering::LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG) { /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when /// possible. -SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) { // Not FP? Not a fsel. if (!Op.getOperand(0).getValueType().isFloatingPoint() || !Op.getOperand(2).getValueType().isFloatingPoint()) - return SDOperand(); + return SDValue(); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); // Cannot handle SETEQ/SETNE. - if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDOperand(); + if (CC == ISD::SETEQ || CC == ISD::SETNE) return SDValue(); MVT ResVT = Op.getValueType(); MVT CmpVT = Op.getOperand(0).getValueType(); - SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); - SDOperand TV = Op.getOperand(2), FV = Op.getOperand(3); + SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); + SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); // If the RHS of the comparison is a 0.0, we don't need to do the // subtraction at all. @@ -2815,7 +2815,7 @@ SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { DAG.getNode(ISD::FNEG, MVT::f64, LHS), TV, FV); } - SDOperand Cmp; + SDValue Cmp; switch (CC) { default: break; // SETUO etc aren't handled by fsel. case ISD::SETULT: @@ -2847,17 +2847,17 @@ SDOperand PPCTargetLowering::LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { Cmp = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Cmp); return DAG.getNode(PPCISD::FSEL, ResVT, Cmp, TV, FV); } - return SDOperand(); + return SDValue(); } // FIXME: Split this code up when LegalizeDAGTypes lands. -SDOperand PPCTargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { assert(Op.getOperand(0).getValueType().isFloatingPoint()); - SDOperand Src = Op.getOperand(0); + SDValue Src = Op.getOperand(0); if (Src.getValueType() == MVT::f32) Src = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Src); - SDOperand Tmp; + SDValue Tmp; switch (Op.getValueType().getSimpleVT()) { default: assert(0 && "Unhandled FP_TO_SINT type in custom expander!"); case MVT::i32: @@ -2869,10 +2869,10 @@ SDOperand PPCTargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { } // Convert the FP value to an int value through memory. - SDOperand FIPtr = DAG.CreateStackTemporary(MVT::f64); + SDValue FIPtr = DAG.CreateStackTemporary(MVT::f64); // Emit a store to the stack slot. - SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0); + SDValue Chain = DAG.getStore(DAG.getEntryNode(), Tmp, FIPtr, NULL, 0); // Result is a load from the stack slot. If loading 4 bytes, make sure to // add in a bias. @@ -2882,20 +2882,20 @@ SDOperand PPCTargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { return DAG.getLoad(Op.getValueType(), Chain, FIPtr, NULL, 0); } -SDOperand PPCTargetLowering::LowerFP_ROUND_INREG(SDOperand Op, +SDValue PPCTargetLowering::LowerFP_ROUND_INREG(SDValue Op, SelectionDAG &DAG) { assert(Op.getValueType() == MVT::ppcf128); SDNode *Node = Op.Val; assert(Node->getOperand(0).getValueType() == MVT::ppcf128); assert(Node->getOperand(0).Val->getOpcode() == ISD::BUILD_PAIR); - SDOperand Lo = Node->getOperand(0).Val->getOperand(0); - SDOperand Hi = Node->getOperand(0).Val->getOperand(1); + SDValue Lo = Node->getOperand(0).Val->getOperand(0); + SDValue Hi = Node->getOperand(0).Val->getOperand(1); // This sequence changes FPSCR to do round-to-zero, adds the two halves // of the long double, and puts FPSCR back the way it was. We do not // actually model FPSCR. std::vector<MVT> NodeTys; - SDOperand Ops[4], Result, MFFSreg, InFlag, FPreg; + SDValue Ops[4], Result, MFFSreg, InFlag, FPreg; NodeTys.push_back(MVT::f64); // Return register NodeTys.push_back(MVT::Flag); // Returns a flag for later insns @@ -2941,14 +2941,14 @@ SDOperand PPCTargetLowering::LowerFP_ROUND_INREG(SDOperand Op, return DAG.getNode(ISD::BUILD_PAIR, Lo.getValueType(), FPreg, FPreg); } -SDOperand PPCTargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) { // Don't handle ppc_fp128 here; let it be lowered to a libcall. if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) - return SDOperand(); + return SDValue(); if (Op.getOperand(0).getValueType() == MVT::i64) { - SDOperand Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); - SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); + SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, MVT::f64, Op.getOperand(0)); + SDValue FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Bits); if (Op.getValueType() == MVT::f32) FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0)); return FP; @@ -2963,28 +2963,28 @@ SDOperand PPCTargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); int FrameIdx = FrameInfo->CreateStackObject(8, 8); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); - SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); + SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); - SDOperand Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, + SDValue Ext64 = DAG.getNode(PPCISD::EXTSW_32, MVT::i32, Op.getOperand(0)); // STD the extended value into the stack slot. MachineMemOperand MO(PseudoSourceValue::getFixedStack(FrameIdx), MachineMemOperand::MOStore, 0, 8, 8); - SDOperand Store = DAG.getNode(PPCISD::STD_32, MVT::Other, + SDValue Store = DAG.getNode(PPCISD::STD_32, MVT::Other, DAG.getEntryNode(), Ext64, FIdx, DAG.getMemOperand(MO)); // Load the value as a double. - SDOperand Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0); + SDValue Ld = DAG.getLoad(MVT::f64, Store, FIdx, NULL, 0); // FCFID it and return it. - SDOperand FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); + SDValue FP = DAG.getNode(PPCISD::FCFID, MVT::f64, Ld); if (Op.getValueType() == MVT::f32) FP = DAG.getNode(ISD::FP_ROUND, MVT::f32, FP, DAG.getIntPtrConstant(0)); return FP; } -SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) { /* The rounding mode is in bits 30:31 of FPSR, and has the following settings: @@ -3008,29 +3008,29 @@ SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); std::vector<MVT> NodeTys; - SDOperand MFFSreg, InFlag; + SDValue MFFSreg, InFlag; // Save FP Control Word to register NodeTys.push_back(MVT::f64); // return register NodeTys.push_back(MVT::Flag); // unused in this context - SDOperand Chain = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0); + SDValue Chain = DAG.getNode(PPCISD::MFFS, NodeTys, &InFlag, 0); // Save FP register to stack slot int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); - SDOperand StackSlot = DAG.getFrameIndex(SSFI, PtrVT); - SDOperand Store = DAG.getStore(DAG.getEntryNode(), Chain, + SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); + SDValue Store = DAG.getStore(DAG.getEntryNode(), Chain, StackSlot, NULL, 0); // Load FP Control Word from low 32 bits of stack slot. - SDOperand Four = DAG.getConstant(4, PtrVT); - SDOperand Addr = DAG.getNode(ISD::ADD, PtrVT, StackSlot, Four); - SDOperand CWD = DAG.getLoad(MVT::i32, Store, Addr, NULL, 0); + SDValue Four = DAG.getConstant(4, PtrVT); + SDValue Addr = DAG.getNode(ISD::ADD, PtrVT, StackSlot, Four); + SDValue CWD = DAG.getLoad(MVT::i32, Store, Addr, NULL, 0); // Transform as necessary - SDOperand CWD1 = + SDValue CWD1 = DAG.getNode(ISD::AND, MVT::i32, CWD, DAG.getConstant(3, MVT::i32)); - SDOperand CWD2 = + SDValue CWD2 = DAG.getNode(ISD::SRL, MVT::i32, DAG.getNode(ISD::AND, MVT::i32, DAG.getNode(ISD::XOR, MVT::i32, @@ -3038,14 +3038,14 @@ SDOperand PPCTargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { DAG.getConstant(3, MVT::i32)), DAG.getConstant(1, MVT::i8)); - SDOperand RetVal = + SDValue RetVal = DAG.getNode(ISD::XOR, MVT::i32, CWD1, CWD2); return DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); } -SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && @@ -3054,26 +3054,26 @@ SDOperand PPCTargetLowering::LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG) { // Expand into a bunch of logical ops. Note that these ops // depend on the PPC behavior for oversized shift amounts. - SDOperand Lo = Op.getOperand(0); - SDOperand Hi = Op.getOperand(1); - SDOperand Amt = Op.getOperand(2); + SDValue Lo = Op.getOperand(0); + SDValue Hi = Op.getOperand(1); + SDValue Amt = Op.getOperand(2); MVT AmtVT = Amt.getValueType(); - SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, + SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT, DAG.getConstant(BitWidth, AmtVT), Amt); - SDOperand Tmp2 = DAG.getNode(PPCISD::SHL, VT, Hi, Amt); - SDOperand Tmp3 = DAG.getNode(PPCISD::SRL, VT, Lo, Tmp1); - SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); - SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, + SDValue Tmp2 = DAG.getNode(PPCISD::SHL, VT, Hi, Amt); + SDValue Tmp3 = DAG.getNode(PPCISD::SRL, VT, Lo, Tmp1); + SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); + SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, DAG.getConstant(-BitWidth, AmtVT)); - SDOperand Tmp6 = DAG.getNode(PPCISD::SHL, VT, Lo, Tmp5); - SDOperand OutHi = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); - SDOperand OutLo = DAG.getNode(PPCISD::SHL, VT, Lo, Amt); - SDOperand OutOps[] = { OutLo, OutHi }; + SDValue Tmp6 = DAG.getNode(PPCISD::SHL, VT, Lo, Tmp5); + SDValue OutHi = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); + SDValue OutLo = DAG.getNode(PPCISD::SHL, VT, Lo, Amt); + SDValue OutOps[] = { OutLo, OutHi }; return DAG.getMergeValues(OutOps, 2); } -SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && @@ -3082,26 +3082,26 @@ SDOperand PPCTargetLowering::LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG) { // Expand into a bunch of logical ops. Note that these ops // depend on the PPC behavior for oversized shift amounts. - SDOperand Lo = Op.getOperand(0); - SDOperand Hi = Op.getOperand(1); - SDOperand Amt = Op.getOperand(2); + SDValue Lo = Op.getOperand(0); + SDValue Hi = Op.getOperand(1); + SDValue Amt = Op.getOperand(2); MVT AmtVT = Amt.getValueType(); - SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, + SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT, DAG.getConstant(BitWidth, AmtVT), Amt); - SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); - SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); - SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); - SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, + SDValue Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); + SDValue Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); + SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); + SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, DAG.getConstant(-BitWidth, AmtVT)); - SDOperand Tmp6 = DAG.getNode(PPCISD::SRL, VT, Hi, Tmp5); - SDOperand OutLo = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); - SDOperand OutHi = DAG.getNode(PPCISD::SRL, VT, Hi, Amt); - SDOperand OutOps[] = { OutLo, OutHi }; + SDValue Tmp6 = DAG.getNode(PPCISD::SRL, VT, Hi, Tmp5); + SDValue OutLo = DAG.getNode(ISD::OR, VT, Tmp4, Tmp6); + SDValue OutHi = DAG.getNode(PPCISD::SRL, VT, Hi, Amt); + SDValue OutOps[] = { OutLo, OutHi }; return DAG.getMergeValues(OutOps, 2); } -SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); unsigned BitWidth = VT.getSizeInBits(); assert(Op.getNumOperands() == 3 && @@ -3109,23 +3109,23 @@ SDOperand PPCTargetLowering::LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG) { "Unexpected SRA!"); // Expand into a bunch of logical ops, followed by a select_cc. - SDOperand Lo = Op.getOperand(0); - SDOperand Hi = Op.getOperand(1); - SDOperand Amt = Op.getOperand(2); + SDValue Lo = Op.getOperand(0); + SDValue Hi = Op.getOperand(1); + SDValue Amt = Op.getOperand(2); MVT AmtVT = Amt.getValueType(); - SDOperand Tmp1 = DAG.getNode(ISD::SUB, AmtVT, + SDValue Tmp1 = DAG.getNode(ISD::SUB, AmtVT, DAG.getConstant(BitWidth, AmtVT), Amt); - SDOperand Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); - SDOperand Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); - SDOperand Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); - SDOperand Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, + SDValue Tmp2 = DAG.getNode(PPCISD::SRL, VT, Lo, Amt); + SDValue Tmp3 = DAG.getNode(PPCISD::SHL, VT, Hi, Tmp1); + SDValue Tmp4 = DAG.getNode(ISD::OR , VT, Tmp2, Tmp3); + SDValue Tmp5 = DAG.getNode(ISD::ADD, AmtVT, Amt, DAG.getConstant(-BitWidth, AmtVT)); - SDOperand Tmp6 = DAG.getNode(PPCISD::SRA, VT, Hi, Tmp5); - SDOperand OutHi = DAG.getNode(PPCISD::SRA, VT, Hi, Amt); - SDOperand OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, AmtVT), + SDValue Tmp6 = DAG.getNode(PPCISD::SRA, VT, Hi, Tmp5); + SDValue OutHi = DAG.getNode(PPCISD::SRA, VT, Hi, Amt); + SDValue OutLo = DAG.getSelectCC(Tmp5, DAG.getConstant(0, AmtVT), Tmp4, Tmp6, ISD::SETLE); - SDOperand OutOps[] = { OutLo, OutHi }; + SDValue OutOps[] = { OutLo, OutHi }; return DAG.getMergeValues(OutOps, 2); } @@ -3145,7 +3145,7 @@ static bool GetConstantBuildVectorBits(SDNode *BV, uint64_t VectorBits[2], unsigned EltBitSize = BV->getOperand(0).getValueType().getSizeInBits(); for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { - SDOperand OpVal = BV->getOperand(i); + SDValue OpVal = BV->getOperand(i); unsigned PartNo = i >= e/2; // In the upper 128 bits? unsigned SlotNo = e/2 - (i & (e/2-1))-1; // Which subpiece of the uint64_t. @@ -3229,7 +3229,7 @@ static bool isConstantSplat(const uint64_t Bits128[2], /// BuildSplatI - Build a canonical splati of Val with an element size of /// SplatSize. Cast the result to VT. -static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT VT, +static SDValue BuildSplatI(int Val, unsigned SplatSize, MVT VT, SelectionDAG &DAG) { assert(Val >= -16 && Val <= 15 && "vsplti is out of range!"); @@ -3246,17 +3246,17 @@ static SDOperand BuildSplatI(int Val, unsigned SplatSize, MVT VT, MVT CanonicalVT = VTys[SplatSize-1]; // Build a canonical splat for this value. - SDOperand Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType()); - SmallVector<SDOperand, 8> Ops; + SDValue Elt = DAG.getConstant(Val, CanonicalVT.getVectorElementType()); + SmallVector<SDValue, 8> Ops; Ops.assign(CanonicalVT.getVectorNumElements(), Elt); - SDOperand Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, + SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, CanonicalVT, &Ops[0], Ops.size()); return DAG.getNode(ISD::BIT_CONVERT, ReqVT, Res); } /// BuildIntrinsicOp - Return a binary operator intrinsic node with the /// specified intrinsic ID. -static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, +static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, SelectionDAG &DAG, MVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = LHS.getValueType(); @@ -3266,8 +3266,8 @@ static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand LHS, SDOperand RHS, /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the /// specified intrinsic ID. -static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, - SDOperand Op2, SelectionDAG &DAG, +static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, + SDValue Op2, SelectionDAG &DAG, MVT DestVT = MVT::Other) { if (DestVT == MVT::Other) DestVT = Op0.getValueType(); return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DestVT, @@ -3277,16 +3277,16 @@ static SDOperand BuildIntrinsicOp(unsigned IID, SDOperand Op0, SDOperand Op1, /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified /// amount. The result has the specified value type. -static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, +static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, MVT VT, SelectionDAG &DAG) { // Force LHS/RHS to be the right type. LHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, LHS); RHS = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, RHS); - SDOperand Ops[16]; + SDValue Ops[16]; for (unsigned i = 0; i != 16; ++i) Ops[i] = DAG.getConstant(i+Amt, MVT::i8); - SDOperand T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, + SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v16i8, LHS, RHS, DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, Ops,16)); return DAG.getNode(ISD::BIT_CONVERT, VT, T); } @@ -3296,7 +3296,7 @@ static SDOperand BuildVSLDOI(SDOperand LHS, SDOperand RHS, unsigned Amt, // selects to a single instruction, return Op. Otherwise, if we can codegen // this case more efficiently than a constant pool load, lower it to the // sequence of ops that should be used. -SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, +SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { // If this is a vector of constants or undefs, get the bits. A bit in // UndefBits is set if the corresponding element of the vector is an @@ -3305,7 +3305,7 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, uint64_t VectorBits[2]; uint64_t UndefBits[2]; if (GetConstantBuildVectorBits(Op.Val, VectorBits, UndefBits)) - return SDOperand(); // Not a constant vector. + return SDValue(); // Not a constant vector. // If this is a splat (repetition) of a value across the whole vector, return // the smallest size that splats it. For example, "0x01010101010101..." is a @@ -3321,7 +3321,7 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, if (SplatBits == 0) { // Canonicalize all zero vectors to be v4i32. if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { - SDOperand Z = DAG.getConstant(0, MVT::i32); + SDValue Z = DAG.getConstant(0, MVT::i32); Z = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Z, Z, Z, Z); Op = DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Z); } @@ -3339,7 +3339,7 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, // If this value is in the range [-32,30] and is even, use: // tmp = VSPLTI[bhw], result = add tmp, tmp if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { - SDOperand Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG); + SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG); Res = DAG.getNode(ISD::ADD, Res.getValueType(), Res, Res); return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Res); } @@ -3349,10 +3349,10 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, // for fneg/fabs. if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { // Make -1 and vspltisw -1: - SDOperand OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); + SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG); // Make the VSLW intrinsic, computing 0x8000_0000. - SDOperand Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, + SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, OnesV, DAG); // xor by OnesV to invert it. @@ -3378,7 +3378,7 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, // vsplti + shl self. if (SextVal == (i << (int)TypeShiftAmt)) { - SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); + SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); static const unsigned IIDs[] = { // Intrinsic to use for each size. Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, Intrinsic::ppc_altivec_vslw @@ -3389,7 +3389,7 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, // vsplti + srl self. if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { - SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); + SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); static const unsigned IIDs[] = { // Intrinsic to use for each size. Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, Intrinsic::ppc_altivec_vsrw @@ -3400,7 +3400,7 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, // vsplti + sra self. if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { - SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); + SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); static const unsigned IIDs[] = { // Intrinsic to use for each size. Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, Intrinsic::ppc_altivec_vsraw @@ -3412,7 +3412,7 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, // vsplti + rol self. if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { - SDOperand Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); + SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG); static const unsigned IIDs[] = { // Intrinsic to use for each size. Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, Intrinsic::ppc_altivec_vrlw @@ -3423,17 +3423,17 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, // t = vsplti c, result = vsldoi t, t, 1 if (SextVal == ((i << 8) | (i >> (TypeShiftAmt-8)))) { - SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); + SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG); } // t = vsplti c, result = vsldoi t, t, 2 if (SextVal == ((i << 16) | (i >> (TypeShiftAmt-16)))) { - SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); + SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG); } // t = vsplti c, result = vsldoi t, t, 3 if (SextVal == ((i << 24) | (i >> (TypeShiftAmt-24)))) { - SDOperand T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); + SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG); return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG); } } @@ -3442,27 +3442,27 @@ SDOperand PPCTargetLowering::LowerBUILD_VECTOR(SDOperand Op, // Odd, in range [17,31]: (vsplti C)-(vsplti -16). if (SextVal >= 0 && SextVal <= 31) { - SDOperand LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG); - SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); + SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG); + SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); LHS = DAG.getNode(ISD::SUB, LHS.getValueType(), LHS, RHS); return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); } // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). if (SextVal >= -31 && SextVal <= 0) { - SDOperand LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG); - SDOperand RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); + SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG); + SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG); LHS = DAG.getNode(ISD::ADD, LHS.getValueType(), LHS, RHS); return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), LHS); } } - return SDOperand(); + return SDValue(); } /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit /// the specified operations to build the shuffle. -static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, - SDOperand RHS, SelectionDAG &DAG) { +static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, + SDValue RHS, SelectionDAG &DAG) { unsigned OpNum = (PFEntry >> 26) & 0x0F; unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); @@ -3486,7 +3486,7 @@ static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, return RHS; } - SDOperand OpLHS, OpRHS; + SDValue OpLHS, OpRHS; OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG); OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG); @@ -3528,7 +3528,7 @@ static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, case OP_VSLDOI12: return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG); } - SDOperand Ops[16]; + SDValue Ops[16]; for (unsigned i = 0; i != 16; ++i) Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i8); @@ -3540,11 +3540,11 @@ static SDOperand GeneratePerfectShuffle(unsigned PFEntry, SDOperand LHS, /// is a shuffle we can handle in a single instruction, return it. Otherwise, /// return the code it can be lowered into. Worst case, it can always be /// lowered into a vperm. -SDOperand PPCTargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, +SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { - SDOperand V1 = Op.getOperand(0); - SDOperand V2 = Op.getOperand(1); - SDOperand PermMask = Op.getOperand(2); + SDValue V1 = Op.getOperand(0); + SDValue V2 = Op.getOperand(1); + SDValue PermMask = Op.getOperand(2); // Cases that are handled by instructions that take permute immediates // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be @@ -3642,7 +3642,7 @@ SDOperand PPCTargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, MVT EltVT = V1.getValueType().getVectorElementType(); unsigned BytesPerElement = EltVT.getSizeInBits()/8; - SmallVector<SDOperand, 16> ResultMask; + SmallVector<SDValue, 16> ResultMask; for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) { unsigned SrcElt; if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF) @@ -3655,7 +3655,7 @@ SDOperand PPCTargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, MVT::i8)); } - SDOperand VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, + SDValue VPermMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v16i8, &ResultMask[0], ResultMask.size()); return DAG.getNode(PPCISD::VPERM, V1.getValueType(), V1, V2, VPermMask); } @@ -3663,7 +3663,7 @@ SDOperand PPCTargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, /// getAltivecCompareInfo - Given an intrinsic, return false if it is not an /// altivec comparison. If it is, return true and fill in Opc/isDot with /// information about the intrinsic. -static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc, +static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot) { unsigned IntrinsicID = cast<ConstantSDNode>(Intrin.getOperand(0))->getValue(); CompareOpc = -1; @@ -3705,25 +3705,25 @@ static bool getAltivecCompareInfo(SDOperand Intrin, int &CompareOpc, /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom /// lower, do it, otherwise return null. -SDOperand PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, +SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { // If this is a lowered altivec predicate compare, CompareOpc is set to the // opcode number of the comparison. int CompareOpc; bool isDot; if (!getAltivecCompareInfo(Op, CompareOpc, isDot)) - return SDOperand(); // Don't custom lower most intrinsics. + return SDValue(); // Don't custom lower most intrinsics. // If this is a non-dot comparison, make the VCMP node and we are done. if (!isDot) { - SDOperand Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), + SDValue Tmp = DAG.getNode(PPCISD::VCMP, Op.getOperand(2).getValueType(), Op.getOperand(1), Op.getOperand(2), DAG.getConstant(CompareOpc, MVT::i32)); return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Tmp); } // Create the PPCISD altivec 'dot' comparison node. - SDOperand Ops[] = { + SDValue Ops[] = { Op.getOperand(2), // LHS Op.getOperand(3), // RHS DAG.getConstant(CompareOpc, MVT::i32) @@ -3731,11 +3731,11 @@ SDOperand PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, std::vector<MVT> VTs; VTs.push_back(Op.getOperand(2).getValueType()); VTs.push_back(MVT::Flag); - SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); + SDValue CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); // Now that we have the comparison, emit a copy from the CR to a GPR. // This is flagged to the above dot comparison. - SDOperand Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, + SDValue Flags = DAG.getNode(PPCISD::MFCR, MVT::i32, DAG.getRegister(PPC::CR6, MVT::i32), CompNode.getValue(1)); @@ -3772,29 +3772,29 @@ SDOperand PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, return Flags; } -SDOperand PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, +SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { // Create a stack slot that is 16-byte aligned. MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); int FrameIdx = FrameInfo->CreateStackObject(16, 16); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); - SDOperand FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); + SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); // Store the input value into Value#0 of the stack slot. - SDOperand Store = DAG.getStore(DAG.getEntryNode(), + SDValue Store = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), FIdx, NULL, 0); // Load it out. return DAG.getLoad(Op.getValueType(), Store, FIdx, NULL, 0); } -SDOperand PPCTargetLowering::LowerMUL(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) { if (Op.getValueType() == MVT::v4i32) { - SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); + SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); - SDOperand Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); - SDOperand Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. + SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG); + SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG); // +16 as shift amt. - SDOperand RHSSwap = // = vrlw RHS, 16 + SDValue RHSSwap = // = vrlw RHS, 16 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG); // Shrinkify inputs to v8i16. @@ -3804,36 +3804,36 @@ SDOperand PPCTargetLowering::LowerMUL(SDOperand Op, SelectionDAG &DAG) { // Low parts multiplied together, generating 32-bit results (we ignore the // top parts). - SDOperand LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, + SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, LHS, RHS, DAG, MVT::v4i32); - SDOperand HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, + SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, LHS, RHSSwap, Zero, DAG, MVT::v4i32); // Shift the high parts up 16 bits. HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, Neg16, DAG); return DAG.getNode(ISD::ADD, MVT::v4i32, LoProd, HiProd); } else if (Op.getValueType() == MVT::v8i16) { - SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); + SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); - SDOperand Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); + SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG); return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, LHS, RHS, Zero, DAG); } else if (Op.getValueType() == MVT::v16i8) { - SDOperand LHS = Op.getOperand(0), RHS = Op.getOperand(1); + SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); // Multiply the even 8-bit parts, producing 16-bit sums. - SDOperand EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, + SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, LHS, RHS, DAG, MVT::v8i16); EvenParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, EvenParts); // Multiply the odd 8-bit parts, producing 16-bit sums. - SDOperand OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, + SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, LHS, RHS, DAG, MVT::v8i16); OddParts = DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, OddParts); // Merge the results together. - SDOperand Ops[16]; + SDValue Ops[16]; for (unsigned i = 0; i != 8; ++i) { Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8); Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8); @@ -3848,7 +3848,7 @@ SDOperand PPCTargetLowering::LowerMUL(SDOperand Op, SelectionDAG &DAG) { /// LowerOperation - Provide custom lowering hooks for some operations. /// -SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Wasn't expecting to be able to lower this!"); case ISD::ConstantPool: return LowerConstantPool(Op, DAG); @@ -3902,14 +3902,14 @@ SDOperand PPCTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); } - return SDOperand(); + return SDValue(); } SDNode *PPCTargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) { switch (N->getOpcode()) { default: assert(0 && "Wasn't expecting to be able to lower this!"); case ISD::FP_TO_SINT: { - SDOperand Res = LowerFP_TO_SINT(SDOperand(N, 0), DAG); + SDValue Res = LowerFP_TO_SINT(SDValue(N, 0), DAG); // Use MERGE_VALUES to drop the chain result value and get a node with one // result. This requires turning off getMergeValues simplification, since // otherwise it will give us Res back. @@ -4127,7 +4127,7 @@ PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // Target Optimization Hooks //===----------------------------------------------------------------------===// -SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, +SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { TargetMachine &TM = getTargetMachine(); SelectionDAG &DAG = DCI.DAG; @@ -4161,7 +4161,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, // type must be i64. if (N->getOperand(0).getValueType() == MVT::i64 && N->getOperand(0).getOperand(0).getValueType() != MVT::ppcf128) { - SDOperand Val = N->getOperand(0).getOperand(0); + SDValue Val = N->getOperand(0).getOperand(0); if (Val.getValueType() == MVT::f32) { Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); DCI.AddToWorklist(Val.Val); @@ -4191,7 +4191,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, N->getOperand(1).getOpcode() == ISD::FP_TO_SINT && N->getOperand(1).getValueType() == MVT::i32 && N->getOperand(1).getOperand(0).getValueType() != MVT::ppcf128) { - SDOperand Val = N->getOperand(1).getOperand(0); + SDValue Val = N->getOperand(1).getOperand(0); if (Val.getValueType() == MVT::f32) { Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val); DCI.AddToWorklist(Val.Val); @@ -4210,7 +4210,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, N->getOperand(1).Val->hasOneUse() && (N->getOperand(1).getValueType() == MVT::i32 || N->getOperand(1).getValueType() == MVT::i16)) { - SDOperand BSwapOp = N->getOperand(1).getOperand(0); + SDValue BSwapOp = N->getOperand(1).getOperand(0); // Do an any-extend to 32-bits if this is a half-word input. if (BSwapOp.getValueType() == MVT::i16) BSwapOp = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, BSwapOp); @@ -4225,23 +4225,23 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, if (ISD::isNON_EXTLoad(N->getOperand(0).Val) && N->getOperand(0).hasOneUse() && (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16)) { - SDOperand Load = N->getOperand(0); + SDValue Load = N->getOperand(0); LoadSDNode *LD = cast<LoadSDNode>(Load); // Create the byte-swapping load. std::vector<MVT> VTs; VTs.push_back(MVT::i32); VTs.push_back(MVT::Other); - SDOperand MO = DAG.getMemOperand(LD->getMemOperand()); - SDOperand Ops[] = { + SDValue MO = DAG.getMemOperand(LD->getMemOperand()); + SDValue Ops[] = { LD->getChain(), // Chain LD->getBasePtr(), // Ptr MO, // MemOperand DAG.getValueType(N->getValueType(0)) // VT }; - SDOperand BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4); + SDValue BSLoad = DAG.getNode(PPCISD::LBRX, VTs, Ops, 4); // If this is an i16 load, insert the truncate. - SDOperand ResVal = BSLoad; + SDValue ResVal = BSLoad; if (N->getValueType(0) == MVT::i16) ResVal = DAG.getNode(ISD::TRUNCATE, MVT::i16, BSLoad); @@ -4254,7 +4254,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, DCI.CombineTo(Load.Val, ResVal, BSLoad.getValue(1)); // Return N so it doesn't get rechecked! - return SDOperand(N, 0); + return SDValue(N, 0); } break; @@ -4295,7 +4295,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, assert(UI != VCMPoNode->use_end() && "Didn't find user!"); SDNode *User = *UI; for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { - if (User->getOperand(i) == SDOperand(VCMPoNode, 1)) { + if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { FlagUser = User; break; } @@ -4305,7 +4305,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, // If the user is a MFCR instruction, we know this is safe. Otherwise we // give up for right now. if (FlagUser->getOpcode() == PPCISD::MFCR) - return SDOperand(VCMPoNode, 0); + return SDValue(VCMPoNode, 0); } break; } @@ -4315,7 +4315,7 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, // lowering is done pre-legalize, because the legalizer lowers the predicate // compare down to code that is difficult to reassemble. ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); - SDOperand LHS = N->getOperand(2), RHS = N->getOperand(3); + SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); int CompareOpc; bool isDot; @@ -4339,14 +4339,14 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, // Create the PPCISD altivec 'dot' comparison node. std::vector<MVT> VTs; - SDOperand Ops[] = { + SDValue Ops[] = { LHS.getOperand(2), // LHS of compare LHS.getOperand(3), // RHS of compare DAG.getConstant(CompareOpc, MVT::i32) }; VTs.push_back(LHS.getOperand(2).getValueType()); VTs.push_back(MVT::Flag); - SDOperand CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); + SDValue CompNode = DAG.getNode(PPCISD::VCMPo, VTs, Ops, 3); // Unpack the result based on how the target uses it. PPC::Predicate CompOpc; @@ -4375,14 +4375,14 @@ SDOperand PPCTargetLowering::PerformDAGCombine(SDNode *N, } } - return SDOperand(); + return SDValue(); } //===----------------------------------------------------------------------===// // Inline Assembly Support //===----------------------------------------------------------------------===// -void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, +void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -4469,10 +4469,10 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. -void PPCTargetLowering::LowerAsmOperandForConstraint(SDOperand Op, char Letter, - std::vector<SDOperand>&Ops, +void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Letter, + std::vector<SDValue>&Ops, SelectionDAG &DAG) const { - SDOperand Result(0,0); + SDValue Result(0,0); switch (Letter) { default: break; case 'I': @@ -4579,16 +4579,16 @@ bool PPCTargetLowering::isLegalAddressImmediate(llvm::GlobalValue* GV) const { return false; } -SDOperand PPCTargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) { // Depths > 0 not supported yet! if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) - return SDOperand(); + return SDValue(); MachineFunction &MF = DAG.getMachineFunction(); PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); // Just load the return address off the stack. - SDOperand RetAddrFI = getReturnAddrFrameIndex(DAG); + SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); // Make sure the function really does not optimize away the store of the RA // to the stack. @@ -4596,10 +4596,10 @@ SDOperand PPCTargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); } -SDOperand PPCTargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { +SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { // Depths > 0 not supported yet! if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) - return SDOperand(); + return SDValue(); MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = PtrVT == MVT::i64; diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index 7f6d445db6..75f4e13cbc 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -215,7 +215,7 @@ namespace llvm { /// formed by using a vspltis[bhw] instruction of the specified element /// size, return the constant being splatted. The ByteSize field indicates /// the number of bytes of each element [124] -> [bhw]. - SDOperand get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); + SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG); } class PPCTargetLowering : public TargetLowering { @@ -236,49 +236,49 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ISD::SETCC ValueType - virtual MVT getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDValue &) const; /// getPreIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if the node's address /// can be legally represented as pre-indexed load / store address. - virtual bool getPreIndexedAddressParts(SDNode *N, SDOperand &Base, - SDOperand &Offset, + virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, + SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG); /// SelectAddressRegReg - Given the specified addressed, check to see if it /// can be represented as an indexed [r+r] operation. Returns false if it /// can be more efficiently represented with [r+imm]. - bool SelectAddressRegReg(SDOperand N, SDOperand &Base, SDOperand &Index, + bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG); /// SelectAddressRegImm - Returns true if the address N can be represented /// by a base register plus a signed 16-bit displacement [r+imm], and if it /// is not better represented as reg+reg. - bool SelectAddressRegImm(SDOperand N, SDOperand &Disp, SDOperand &Base, + bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG); /// SelectAddressRegRegOnly - Given the specified addressed, force it to be /// represented as an indexed [r+r] operation. - bool SelectAddressRegRegOnly(SDOperand N, SDOperand &Base, SDOperand &Index, + bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG); /// SelectAddressRegImmShift - Returns true if the address N can be /// represented by a base register plus a signed 14-bit displacement /// [r+imm*4]. Suitable for use by STD and friends. - bool SelectAddressRegImmShift(SDOperand N, SDOperand &Disp, SDOperand &Base, + bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG); /// LowerOperation - Provide custom lowering hooks for some operations. /// - virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG); - virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; - virtual void computeMaskedBitsForTargetNode(const SDOperand Op, + virtual void computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -300,9 +300,9 @@ namespace llvm { /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. - virtual void LowerAsmOperandForConstraint(SDOperand Op, + virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, - std::vector<SDOperand> &Ops, + std::vector<SDValue> &Ops, SelectionDAG &DAG) const; /// isLegalAddressingMode - Return true if the addressing mode represented @@ -321,63 +321,63 @@ namespace llvm { /// IsEligibleForTailCallOptimization - Check whether the call is eligible /// for tail call optimization. Target which want to do tail call /// optimization should implement this function. - virtual bool IsEligibleForTailCallOptimization(SDOperand Call, - SDOperand Ret, + virtual bool IsEligibleForTailCallOptimization(SDValue Call, + SDValue Ret, SelectionDAG &DAG) const; private: - SDOperand getFramePointerFrameIndex(SelectionDAG & DAG) const; - SDOperand getReturnAddrFrameIndex(SelectionDAG & DAG) const; + SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const; + SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const; - SDOperand EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, + SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG, int SPDiff, - SDOperand Chain, - SDOperand &LROpOut, - SDOperand &FPOpOut); - - SDOperand LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, + SDValue Chain, + SDValue &LROpOut, + SDValue &FPOpOut); + + SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG); + SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG); + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); + SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG); + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, int VarArgsFrameIndex, int VarArgsStackOffset, unsigned VarArgsNumGPR, unsigned VarArgsNumFPR, const PPCSubtarget &Subtarget); - SDOperand LowerVAARG(SDOperand Op, SelectionDAG &DAG, int VarArgsFrameIndex, + SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG, int VarArgsFrameIndex, int VarArgsStackOffset, unsigned VarArgsNumGPR, unsigned VarArgsNumFPR, const PPCSubtarget &Subtarget); - SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG, + SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex, int &VarArgsStackOffset, unsigned &VarArgsNumGPR, unsigned &VarArgsNumFPR, const PPCSubtarget &Subtarget); - SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG, + SDValue LowerCALL(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget, TargetMachine &TM); - SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG, TargetMachine &TM); - SDOperand LowerSTACKRESTORE(SDOperand Op, SelectionDAG &DAG, + SDValue LowerRET(SDValue Op, SelectionDAG &DAG, TargetMachine &TM); + SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget); - SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG, + SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget); - SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerAtomicLOAD_ADD(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerAtomicCMP_SWAP(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerAtomicSWAP(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFP_ROUND_INREG(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSHL_PARTS(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSRL_PARTS(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSRA_PARTS(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerMUL(SDOperand Op, SelectionDAG &DAG); + SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG); + SDValue LowerAtomicLOAD_ADD(SDValue Op, SelectionDAG &DAG); + SDValue LowerAtomicCMP_SWAP(SDValue Op, SelectionDAG &DAG); + SDValue LowerAtomicSWAP(SDValue Op, SelectionDAG &DAG); + SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG); + SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG); + SDValue LowerFP_ROUND_INREG(SDValue Op, SelectionDAG &DAG); + SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG); + SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG); + SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG); + SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG); + SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG); + SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG); + SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG); + SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG); + SDValue LowerMUL(SDValue Op, SelectionDAG &DAG); }; } diff --git a/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/lib/Target/Sparc/SparcISelDAGToDAG.cpp index d1d2b5bdda..7d2d11ec72 100644 --- a/lib/Target/Sparc/SparcISelDAGToDAG.cpp +++ b/lib/Target/Sparc/SparcISelDAGToDAG.cpp @@ -40,12 +40,12 @@ public: Subtarget(TM.getSubtarget<SparcSubtarget>()) { } - SDNode *Select(SDOperand Op); + SDNode *Select(SDValue Op); // Complex Pattern Selectors. - bool SelectADDRrr(SDOperand Op, SDOperand N, SDOperand &R1, SDOperand &R2); - bool SelectADDRri(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Offset); + bool SelectADDRrr(SDValue Op, SDValue N, SDValue &R1, SDValue &R2); + bool SelectADDRri(SDValue Op, SDValue N, SDValue &Base, + SDValue &Offset); /// InstructionSelect - This callback is invoked by /// SelectionDAGISel when it has created a SelectionDAG for us to codegen. @@ -70,8 +70,8 @@ void SparcDAGToDAGISel::InstructionSelect(SelectionDAG &DAG) { DAG.RemoveDeadNodes(); } -bool SparcDAGToDAGISel::SelectADDRri(SDOperand Op, SDOperand Addr, - SDOperand &Base, SDOperand &Offset) { +bool SparcDAGToDAGISel::SelectADDRri(SDValue Op, SDValue Addr, + SDValue &Base, SDValue &Offset) { if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); Offset = CurDAG->getTargetConstant(0, MVT::i32); @@ -111,8 +111,8 @@ bool SparcDAGToDAGISel::SelectADDRri(SDOperand Op, SDOperand Addr, return true; } -bool SparcDAGToDAGISel::SelectADDRrr(SDOperand Op, SDOperand Addr, - SDOperand &R1, SDOperand &R2) { +bool SparcDAGToDAGISel::SelectADDRrr(SDValue Op, SDValue Addr, + SDValue &R1, SDValue &R2) { if (Addr.getOpcode() == ISD::FrameIndex) return false; if (Addr.getOpcode() == ISD::TargetExternalSymbol || Addr.getOpcode() == ISD::TargetGlobalAddress) @@ -135,7 +135,7 @@ bool SparcDAGToDAGISel::SelectADDRrr(SDOperand Op, SDOperand Addr, return true; } -SDNode *SparcDAGToDAGISel::Select(SDOperand Op) { +SDNode *SparcDAGToDAGISel::Select(SDValue Op) { SDNode *N = Op.Val; if (N->isMachineOpcode()) return NULL; // Already selected. @@ -145,20 +145,20 @@ SDNode *SparcDAGToDAGISel::Select(SDOperand Op) { case ISD::SDIV: case ISD::UDIV: { // FIXME: should use a custom expander to expose the SRA to the dag. - SDOperand DivLHS = N->getOperand(0); - SDOperand DivRHS = N->getOperand(1); + SDValue DivLHS = N->getOperand(0); + SDValue DivRHS = N->getOperand(1); AddToISelQueue(DivLHS); AddToISelQueue(DivRHS); // Set the Y register to the high-part. - SDOperand TopPart; + SDValue TopPart; if (N->getOpcode() == ISD::SDIV) { - TopPart = SDOperand(CurDAG->getTargetNode(SP::SRAri, MVT::i32, DivLHS, + TopPart = SDValue(CurDAG->getTargetNode(SP::SRAri, MVT::i32, DivLHS, CurDAG->getTargetConstant(31, MVT::i32)), 0); } else { TopPart = CurDAG->getRegister(SP::G0, MVT::i32); } - TopPart = SDOperand(CurDAG->getTargetNode(SP::WRYrr, MVT::Flag, TopPart, + TopPart = SDValue(CurDAG->getTargetNode(SP::WRYrr, MVT::Flag, TopPart, CurDAG->getRegister(SP::G0, MVT::i32)), 0); // FIXME: Handle div by immediate. @@ -169,15 +169,15 @@ SDNode *SparcDAGToDAGISel::Select(SDOperand Op) { case ISD::MULHU: case ISD::MULHS: { // FIXME: Handle mul by immediate. - SDOperand MulLHS = N->getOperand(0); - SDOperand MulRHS = N->getOperand(1); + SDValue MulLHS = N->getOperand(0); + SDValue MulRHS = N->getOperand(1); AddToISelQueue(MulLHS); AddToISelQueue(MulRHS); unsigned Opcode = N->getOpcode() == ISD::MULHU ? SP::UMULrr : SP::SMULrr; SDNode *Mul = CurDAG->getTargetNode(Opcode, MVT::i32, MVT::Flag, MulLHS, MulRHS); // The high part is in the Y register. - return CurDAG->SelectNodeTo(N, SP::RDY, MVT::i32, SDOperand(Mul, 1)); + return CurDAG->SelectNodeTo(N, SP::RDY, MVT::i32, SDValue(Mul, 1)); return NULL; } } diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 342caf87f9..e0c433546a 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -30,7 +30,7 @@ using namespace llvm; #include "SparcGenCallingConv.inc" -static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) { // CCValAssign - represent the assignment of the return value to locations. SmallVector<CCValAssign, 16> RVLocs; unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv(); @@ -50,8 +50,8 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } - SDOperand Chain = Op.getOperand(0); - SDOperand Flag; + SDValue Chain = Op.getOperand(0); + SDValue Flag; // Copy the result values into the output registers. for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -76,7 +76,7 @@ static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) { /// in FP registers for fastcc functions. void SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, - SmallVectorImpl<SDOperand> &ArgValues) { + SmallVectorImpl<SDValue> &ArgValues) { MachineFunction &MF = DAG.getMachineFunction(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); @@ -87,8 +87,8 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, const unsigned *CurArgReg = ArgRegs, *ArgRegEnd = ArgRegs+6; unsigned ArgOffset = 68; - SDOperand Root = DAG.getRoot(); - std::vector<SDOperand> OutChains; + SDValue Root = DAG.getRoot(); + std::vector<SDValue> OutChains; for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) { MVT ObjectVT = getValueType(I->getType()); @@ -105,7 +105,7 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, } else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VReg); - SDOperand Arg = DAG.getCopyFromReg(Root, VReg, MVT::i32); + SDValue Arg = DAG.getCopyFromReg(Root, VReg, MVT::i32); if (ObjectVT != MVT::i32) { unsigned AssertOp = ISD::AssertSext; Arg = DAG.getNode(AssertOp, MVT::i32, Arg, @@ -115,8 +115,8 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, ArgValues.push_back(Arg); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); - SDOperand FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); - SDOperand Load; + SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); + SDValue Load; if (ObjectVT == MVT::i32) { Load = DAG.getLoad(MVT::i32, Root, FIPtr, NULL, 0); } else { @@ -143,14 +143,14 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, // FP value is passed in an integer register. unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VReg); - SDOperand Arg = DAG.getCopyFromReg(Root, VReg, MVT::i32); + SDValue Arg = DAG.getCopyFromReg(Root, VReg, MVT::i32); Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Arg); ArgValues.push_back(Arg); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); - SDOperand FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); - SDOperand Load = DAG.getLoad(MVT::f32, Root, FIPtr, NULL, 0); + SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); + SDValue Load = DAG.getLoad(MVT::f32, Root, FIPtr, NULL, 0); ArgValues.push_back(Load); } ArgOffset += 4; @@ -163,30 +163,30 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, if (CurArgReg < ArgRegEnd) ++CurArgReg; ArgValues.push_back(DAG.getNode(ISD::UNDEF, ObjectVT)); } else { - SDOperand HiVal; + SDValue HiVal; if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VRegHi); HiVal = DAG.getCopyFromReg(Root, VRegHi, MVT::i32); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); - SDOperand FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); + SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); HiVal = DAG.getLoad(MVT::i32, Root, FIPtr, NULL, 0); } - SDOperand LoVal; + SDValue LoVal; if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR unsigned VRegLo = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg++, VRegLo); LoVal = DAG.getCopyFromReg(Root, VRegLo, MVT::i32); } else { int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4); - SDOperand FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); + SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); LoVal = DAG.getLoad(MVT::i32, Root, FIPtr, NULL, 0); } // Compose the two halves together into an i64 unit. - SDOperand WholeValue = + SDValue WholeValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, LoVal, HiVal); // If we want a double, do a bit convert. @@ -208,10 +208,10 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, for (; CurArgReg != ArgRegEnd; ++CurArgReg) { unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass); MF.getRegInfo().addLiveIn(*CurArgReg, VReg); - SDOperand Arg = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); + SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32); int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset); - SDOperand FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); + SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32); OutChains.push_back(DAG.getStore(DAG.getRoot(), Arg, FIPtr, NULL, 0)); ArgOffset += 4; @@ -223,10 +223,10 @@ SparcTargetLowering::LowerArguments(Function &F, SelectionDAG &DAG, &OutChains[0], OutChains.size())); } -static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) { unsigned CallingConv = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); - SDOperand Chain = Op.getOperand(0); - SDOperand Callee = Op.getOperand(4); + SDValue Chain = Op.getOperand(0); + SDValue Callee = Op.getOperand(4); bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; #if 0 @@ -270,8 +270,8 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize)); - SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; - SmallVector<SDOperand, 8> MemOpChains; + SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; + SmallVector<SDValue, 8> MemOpChains; #if 0 // Walk the register/memloc assignments, inserting copies/loads. @@ -279,7 +279,7 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { CCValAssign &VA = ArgLocs[i]; // Arguments start after the 5 first operands of ISD::CALL - SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); + SDValue Arg = Op.getOperand(5+2*VA.getValNo()); // Promote the value if needed. switch (VA.getLocInfo()) { @@ -306,9 +306,9 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { assert(VA.isMemLoc()); // Create a store off the stack pointer for this argument. - SDOperand StackPtr = DAG.getRegister(SP::O6, MVT::i32); + SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); // FIXME: VERIFY THAT 68 IS RIGHT. - SDOperand PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+68); + SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+68); PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0)); } @@ -320,9 +320,9 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { unsigned ArgOffset = 68; for (unsigned i = 5, e = Op.getNumOperands(); i != e; i += 2) { - SDOperand Val = Op.getOperand(i); + SDValue Val = Op.getOperand(i); MVT ObjectVT = Val.getValueType(); - SDOperand ValToStore(0, 0); + SDValue ValToStore(0, 0); unsigned ObjSize; switch (ObjectVT.getSimpleVT()) { default: assert(0 && "Unhandled argument type!"); @@ -358,9 +358,9 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { } // Split the value into top and bottom part. Top part goes in a reg. - SDOperand Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Val, + SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Val, DAG.getConstant(1, MVT::i32)); - SDOperand Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Val, + SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Val, DAG.getConstant(0, MVT::i32)); RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Hi)); @@ -375,8 +375,8 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { } if (ValToStore.Val) { - SDOperand StackPtr = DAG.getRegister(SP::O6, MVT::i32); - SDOperand PtrOff = DAG.getConstant(ArgOffset, MVT::i32); + SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); + SDValue PtrOff = DAG.getConstant(ArgOffset, MVT::i32); PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff); MemOpChains.push_back(DAG.getStore(Chain, ValToStore, PtrOff, NULL, 0)); } @@ -393,7 +393,7 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { // chain and flag operands which copy the outgoing args into registers. // The InFlag in necessary since all emited instructions must be // stuck together. - SDOperand InFlag; + SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { unsigned Reg = RegsToPass[i].first; // Remap I0->I7 -> O0->O7. @@ -415,7 +415,7 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { std::vector<MVT> NodeTys; NodeTys.push_back(MVT::Other); // Returns a chain NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use. - SDOperand Ops[] = { Chain, Callee, InFlag }; + SDValue Ops[] = { Chain, Callee, InFlag }; Chain = DAG.getNode(SPISD::CALL, NodeTys, Ops, InFlag.Val ? 3 : 2); InFlag = Chain.getValue(1); @@ -429,7 +429,7 @@ static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) { CCState RVInfo(CallingConv, isVarArg, DAG.getTarget(), RVLocs); RVInfo.AnalyzeCallResult(Op.Val, RetCC_Sparc32); - SmallVector<SDOperand, 8> ResultVals; + SmallVector<SDValue, 8> ResultVals; // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -650,7 +650,7 @@ const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const { /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to /// be zero. Op is expected to be a target specific node. Used by DAG /// combiner. -void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, +void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -679,7 +679,7 @@ void SparcTargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition. -static void LookThroughSetCC(SDOperand &LHS, SDOperand &RHS, +static void LookThroughSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode CC, unsigned &SPCC) { if (isa<ConstantSDNode>(RHS) && cast<ConstantSDNode>(RHS)->getValue() == 0 && CC == ISD::SETNE && @@ -691,50 +691,50 @@ static void LookThroughSetCC(SDOperand &LHS, SDOperand &RHS, isa<ConstantSDNode>(LHS.getOperand(1)) && cast<ConstantSDNode>(LHS.getOperand(0))->getValue() == 1 && cast<ConstantSDNode>(LHS.getOperand(1))->getValue() == 0) { - SDOperand CMPCC = LHS.getOperand(3); + SDValue CMPCC = LHS.getOperand(3); SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getValue(); LHS = CMPCC.getOperand(0); RHS = CMPCC.getOperand(1); } } -static SDOperand LowerGLOBALADDRESS(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) { GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); - SDOperand GA = DAG.getTargetGlobalAddress(GV, MVT::i32); - SDOperand Hi = DAG.getNode(SPISD::Hi, MVT::i32, GA); - SDOperand Lo = DAG.getNode(SPISD::Lo, MVT::i32, GA); + SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32); + SDValue Hi = DAG.getNode(SPISD::Hi, MVT::i32, GA); + SDValue Lo = DAG.getNode(SPISD::Lo, MVT::i32, GA); return DAG.getNode(ISD::ADD, MVT::i32, Lo, Hi); } -static SDOperand LowerCONSTANTPOOL(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerCONSTANTPOOL(SDValue Op, SelectionDAG &DAG) { ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); Constant *C = N->getConstVal(); - SDOperand CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment()); - SDOperand Hi = DAG.getNode(SPISD::Hi, MVT::i32, CP); - SDOperand Lo = DAG.getNode(SPISD::Lo, MVT::i32, CP); + SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment()); + SDValue Hi = DAG.getNode(SPISD::Hi, MVT::i32, CP); + SDValue Lo = DAG.getNode(SPISD::Lo, MVT::i32, CP); return DAG.getNode(ISD::ADD, MVT::i32, Lo, Hi); } -static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { // Convert the fp value to integer in an FP register. assert(Op.getValueType() == MVT::i32); Op = DAG.getNode(SPISD::FTOI, MVT::f32, Op.getOperand(0)); return DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op); } -static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) { assert(Op.getOperand(0).getValueType() == MVT::i32); - SDOperand Tmp = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Op.getOperand(0)); + SDValue Tmp = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Op.getOperand(0)); // Convert the int value to FP in an FP register. return DAG.getNode(SPISD::ITOF, Op.getValueType(), Tmp); } -static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG) { - SDOperand Chain = Op.getOperand(0); +static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) { + SDValue Chain = Op.getOperand(0); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); - SDOperand LHS = Op.getOperand(2); - SDOperand RHS = Op.getOperand(3); - SDOperand Dest = Op.getOperand(4); + SDValue LHS = Op.getOperand(2); + SDValue RHS = Op.getOperand(3); + SDValue Dest = Op.getOperand(4); unsigned Opc, SPCC = ~0U; // If this is a br_cc of a "setcc", and if the setcc got lowered into @@ -742,12 +742,12 @@ static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG) { LookThroughSetCC(LHS, RHS, CC, SPCC); // Get the condition flag. - SDOperand CompareFlag; + SDValue CompareFlag; if (LHS.getValueType() == MVT::i32) { std::vector<MVT> VTs; VTs.push_back(MVT::i32); VTs.push_back(MVT::Flag); - SDOperand Ops[2] = { LHS, RHS }; + SDValue Ops[2] = { LHS, RHS }; CompareFlag = DAG.getNode(SPISD::CMPICC, VTs, Ops, 2).getValue(1); if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); Opc = SPISD::BRICC; @@ -760,24 +760,24 @@ static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG) { DAG.getConstant(SPCC, MVT::i32), CompareFlag); } -static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { - SDOperand LHS = Op.getOperand(0); - SDOperand RHS = Op.getOperand(1); +static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) { + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); - SDOperand TrueVal = Op.getOperand(2); - SDOperand FalseVal = Op.getOperand(3); + SDValue TrueVal = Op.getOperand(2); + SDValue FalseVal = Op.getOperand(3); unsigned Opc, SPCC = ~0U; // If this is a select_cc of a "setcc", and if the setcc got lowered into // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values. LookThroughSetCC(LHS, RHS, CC, SPCC); - SDOperand CompareFlag; + SDValue CompareFlag; if (LHS.getValueType() == MVT::i32) { std::vector<MVT> VTs; VTs.push_back(LHS.getValueType()); // subcc returns a value VTs.push_back(MVT::Flag); - SDOperand Ops[2] = { LHS, RHS }; + SDValue Ops[2] = { LHS, RHS }; CompareFlag = DAG.getNode(SPISD::CMPICC, VTs, Ops, 2).getValue(1); Opc = SPISD::SELECT_ICC; if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC); @@ -790,11 +790,11 @@ static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) { DAG.getConstant(SPCC, MVT::i32), CompareFlag); } -static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, +static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG, SparcTargetLowering &TLI) { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - SDOperand Offset = DAG.getNode(ISD::ADD, MVT::i32, + SDValue Offset = DAG.getNode(ISD::ADD, MVT::i32, DAG.getRegister(SP::I6, MVT::i32), DAG.getConstant(TLI.getVarArgsFrameOffset(), MVT::i32)); @@ -802,15 +802,15 @@ static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG, return DAG.getStore(Op.getOperand(0), Offset, Op.getOperand(1), SV, 0); } -static SDOperand LowerVAARG(SDOperand Op, SelectionDAG &DAG) { +static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) { SDNode *Node = Op.Val; MVT VT = Node->getValueType(0); - SDOperand InChain = Node->getOperand(0); - SDOperand VAListPtr = Node->getOperand(1); + SDValue InChain = Node->getOperand(0); + SDValue VAListPtr = Node->getOperand(1); const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); - SDOperand VAList = DAG.getLoad(MVT::i32, InChain, VAListPtr, SV, 0); + SDValue VAList = DAG.getLoad(MVT::i32, InChain, VAListPtr, SV, 0); // Increment the pointer, VAList, to the next vaarg - SDOperand NextPtr = DAG.getNode(ISD::ADD, MVT::i32, VAList, + SDValue NextPtr = DAG.getNode(ISD::ADD, MVT::i32, VAList, DAG.getConstant(VT.getSizeInBits()/8, MVT::i32)); // Store the incremented VAList to the legalized pointer @@ -822,41 +822,41 @@ static SDOperand LowerVAARG(SDOperand Op, SelectionDAG &DAG) { return DAG.getLoad(VT, InChain, VAList, NULL, 0); // Otherwise, load it as i64, then do a bitconvert. - SDOperand V = DAG.getLoad(MVT::i64, InChain, VAList, NULL, 0); + SDValue V = DAG.getLoad(MVT::i64, InChain, VAList, NULL, 0); // Bit-Convert the value to f64. - SDOperand Ops[2] = { + SDValue Ops[2] = { DAG.getNode(ISD::BIT_CONVERT, MVT::f64, V), V.getValue(1) }; return DAG.getMergeValues(Ops, 2); } -static SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG) { - SDOperand Chain = Op.getOperand(0); // Legalize the chain. - SDOperand Size = Op.getOperand(1); // Legalize the size. +static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) { + SDValue Chain = Op.getOperand(0); // Legalize the chain. + SDValue Size = Op.getOperand(1); // Legalize the size. unsigned SPReg = SP::O6; - SDOperand SP = DAG.getCopyFromReg(Chain, SPReg, MVT::i32); - SDOperand NewSP = DAG.getNode(ISD::SUB, MVT::i32, SP, Size); // Value + SDValue SP = DAG.getCopyFromReg(Chain, SPReg, MVT::i32); + SDValue NewSP = DAG.getNode(ISD::SUB, MVT::i32, SP, Size); // Value Chain = DAG.getCopyToReg(SP.getValue(1), SPReg, NewSP); // Output chain // The resultant pointer is actually 16 words from the bottom of the stack, // to provide a register spill area. - SDOperand NewVal = DAG.getNode(ISD::ADD, MVT::i32, NewSP, + SDValue NewVal = DAG.getNode(ISD::ADD, MVT::i32, NewSP, DAG.getConstant(96, MVT::i32)); - SDOperand Ops[2] = { NewVal, Chain }; + SDValue Ops[2] = { NewVal, Chain }; return DAG.getMergeValues(Ops, 2); } -SDOperand SparcTargetLowering:: -LowerOperation(SDOperand Op, SelectionDAG &DAG) { +SDValue SparcTargetLowering:: +LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Should not custom lower this!"); // Frame & Return address. Currently unimplemented - case ISD::RETURNADDR: return SDOperand(); - case ISD::FRAMEADDR: return SDOperand(); + case ISD::RETURNADDR: return SDValue(); + case ISD::FRAMEADDR: return SDValue(); case ISD::GlobalTLSAddress: assert(0 && "TLS not implemented for Sparc."); case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG); diff --git a/lib/Target/Sparc/SparcISelLowering.h b/lib/Target/Sparc/SparcISelLowering.h index 2257304b7d..ac3a7c0027 100644 --- a/lib/Target/Sparc/SparcISelLowering.h +++ b/lib/Target/Sparc/SparcISelLowering.h @@ -43,14 +43,14 @@ namespace llvm { int VarArgsFrameOffset; // Frame offset to start of varargs area. public: SparcTargetLowering(TargetMachine &TM); - virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); int getVarArgsFrameOffset() const { return VarArgsFrameOffset; } /// computeMaskedBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the /// KnownZero/KnownOne bitsets. - virtual void computeMaskedBitsForTargetNode(const SDOperand Op, + virtual void computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -58,7 +58,7 @@ namespace llvm { unsigned Depth = 0) const; virtual void LowerArguments(Function &F, SelectionDAG &DAG, - SmallVectorImpl<SDOperand> &ArgValues); + SmallVectorImpl<SDValue> &ArgValues); virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB); diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 03aa74c44f..e9fefcbf5f 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -50,7 +50,7 @@ STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor"); namespace { /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses - /// SDOperand's instead of register numbers for the leaves of the matched + /// SDValue's instead of register numbers for the leaves of the matched /// tree. struct X86ISelAddressMode { enum { @@ -59,13 +59,13 @@ namespace { } BaseType; struct { // This is really a union, discriminated by BaseType! - SDOperand Reg; + SDValue Reg; int FrameIndex; } Base; bool isRIPRel; // RIP as base? unsigned Scale; - SDOperand IndexReg; + SDValue IndexReg; unsigned Disp; GlobalValue *GV; Constant *CP; @@ -143,38 +143,38 @@ namespace { #include "X86GenDAGISel.inc" private: - SDNode *Select(SDOperand N); + SDNode *Select(SDValue N); - bool MatchAddress(SDOperand N, X86ISelAddressMode &AM, + bool MatchAddress(SDValue N, X86ISelAddressMode &AM, bool isRoot = true, unsigned Depth = 0); - bool MatchAddressBase(SDOperand N, X86ISelAddressMode &AM, + bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM, bool isRoot, unsigned Depth); - bool SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Scale, SDOperand &Index, SDOperand &Disp); - bool SelectLEAAddr(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Scale, SDOperand &Index, SDOperand &Disp); - bool SelectScalarSSELoad(SDOperand Op, SDOperand Pred, - SDOperand N, SDOperand &Base, SDOperand &Scale, - SDOperand &Index, SDOperand &Disp, - SDOperand &InChain, SDOperand &OutChain); - bool TryFoldLoad(SDOperand P, SDOperand N, - SDOperand &Base, SDOperand &Scale, - SDOperand &Index, SDOperand &Disp); + bool SelectAddr(SDValue Op, SDValue N, SDValue &Base, + SDValue &Scale, SDValue &Index, SDValue &Disp); + bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base, + SDValue &Scale, SDValue &Index, SDValue &Disp); + bool SelectScalarSSELoad(SDValue Op, SDValue Pred, + SDValue N, SDValue &Base, SDValue &Scale, + SDValue &Index, SDValue &Disp, + SDValue &InChain, SDValue &OutChain); + bool TryFoldLoad(SDValue P, SDValue N, + SDValue &Base, SDValue &Scale, + SDValue &Index, SDValue &Disp); void PreprocessForRMW(SelectionDAG &DAG); void PreprocessForFPConvert(SelectionDAG &DAG); /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for /// inline asm expressions. - virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op, + virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, - std::vector<SDOperand> &OutOps, + std::vector<SDValue> &OutOps, SelectionDAG &DAG); void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI); - inline void getAddressOperands(X86ISelAddressMode &AM, SDOperand &Base, - SDOperand &Scale, SDOperand &Index, - SDOperand &Disp) { + inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base, + SDValue &Scale, SDValue &Index, + SDValue &Disp) { Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ? CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) : AM.Base.Reg; @@ -196,19 +196,19 @@ namespace { /// getI8Imm - Return a target constant with the specified value, of type /// i8. - inline SDOperand getI8Imm(unsigned Imm) { + inline SDValue getI8Imm(unsigned Imm) { return CurDAG->getTargetConstant(Imm, MVT::i8); } /// getI16Imm - Return a target constant with the specified value, of type /// i16. - inline SDOperand getI16Imm(unsigned Imm) { + inline SDValue getI16Imm(unsigned Imm) { return CurDAG->getTargetConstant(Imm, MVT::i16); } /// getI32Imm - Return a target constant with the specified value, of type /// i32. - inline SDOperand getI32Imm(unsigned Imm) { + inline SDValue getI32Imm(unsigned Imm) { return CurDAG->getTargetConstant(Imm, MVT::i32); } @@ -218,7 +218,7 @@ namespace { /// getTruncate - return an SDNode that implements a subreg based truncate /// of the specified operand to the the specified value type. - SDNode *getTruncate(SDOperand N0, MVT VT); + SDNode *getTruncate(SDValue N0, MVT VT); #ifndef NDEBUG unsigned Indent; @@ -233,7 +233,7 @@ static SDNode *findFlagUse(SDNode *N) { for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) { SDNode *User = *I; for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { - SDOperand Op = User->getOperand(i); + SDValue Op = User->getOperand(i); if (Op.Val == N && Op.ResNo == FlagResNo) return User; } @@ -352,9 +352,9 @@ bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const { /// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand /// and move load below the TokenFactor. Replace store's chain operand with /// load's chain result. -static void MoveBelowTokenFactor(SelectionDAG &DAG, SDOperand Load, - SDOperand Store, SDOperand TF) { - std::vector<SDOperand> Ops; +static void MoveBelowTokenFactor(SelectionDAG &DAG, SDValue Load, + SDValue Store, SDValue TF) { + std::vector<SDValue> Ops; for (unsigned i = 0, e = TF.Val->getNumOperands(); i != e; ++i) if (Load.Val == TF.Val->getOperand(i).Val) Ops.push_back(Load.Val->getOperand(0)); @@ -368,8 +368,8 @@ static void MoveBelowTokenFactor(SelectionDAG &DAG, SDOperand Load, /// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG. /// -static bool isRMWLoad(SDOperand N, SDOperand Chain, SDOperand Address, - SDOperand &Load) { +static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address, + SDValue &Load) { if (N.getOpcode() == ISD::BIT_CONVERT) N = N.getOperand(0); @@ -437,19 +437,19 @@ void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) { E = DAG.allnodes_end(); I != E; ++I) { if (!ISD::isNON_TRUNCStore(I)) continue; - SDOperand Chain = I->getOperand(0); + SDValue Chain = I->getOperand(0); if (Chain.Val->getOpcode() != ISD::TokenFactor) continue; - SDOperand N1 = I->getOperand(1); - SDOperand N2 = I->getOperand(2); + SDValue N1 = I->getOperand(1); + SDValue N2 = I->getOperand(2); if ((N1.getValueType().isFloatingPoint() && !N1.getValueType().isVector()) || !N1.hasOneUse()) continue; bool RModW = false; - SDOperand Load; + SDValue Load; unsigned Opcode = N1.Val->getOpcode(); switch (Opcode) { case ISD::ADD: @@ -460,8 +460,8 @@ void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) { case ISD::ADDC: case ISD::ADDE: case ISD::VECTOR_SHUFFLE: { - SDOperand N10 = N1.getOperand(0); - SDOperand N11 = N1.getOperand(1); + SDValue N10 = N1.getOperand(0); + SDValue N11 = N1.getOperand(1); RModW = isRMWLoad(N10, Chain, N2, Load); if (!RModW) RModW = isRMWLoad(N11, Chain, N2, Load); @@ -477,14 +477,14 @@ void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) { case ISD::SUBE: case X86ISD::SHLD: case X86ISD::SHRD: { - SDOperand N10 = N1.getOperand(0); + SDValue N10 = N1.getOperand(0); RModW = isRMWLoad(N10, Chain, N2, Load); break; } } if (RModW) { - MoveBelowTokenFactor(DAG, Load, SDOperand(I, 0), Chain); + MoveBelowTokenFactor(DAG, Load, SDValue(I, 0), Chain); ++NumLoadMoved; } } @@ -533,12 +533,12 @@ void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) { else MemVT = SrcIsSSE ? SrcVT : DstVT; - SDOperand MemTmp = DAG.CreateStackTemporary(MemVT); + SDValue MemTmp = DAG.CreateStackTemporary(MemVT); // FIXME: optimize the case where the src/dest is a load or store? - SDOperand Store = DAG.getTruncStore(DAG.getEntryNode(), N->getOperand(0), + SDValue Store = DAG.getTruncStore(DAG.getEntryNode(), N->getOperand(0), MemTmp, NULL, 0, MemVT); - SDOperand Result = DAG.getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp, + SDValue Result = DAG.getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp, NULL, 0, MemVT); // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the @@ -546,7 +546,7 @@ void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) { // anything below the conversion could be folded into other existing nodes. // To avoid invalidating 'I', back it up to the convert node. --I; - DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result); + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); // Now that we did that, the node is dead. Increment the iterator to the // next node to process, then delete N. @@ -674,7 +674,7 @@ void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) { /// MatchAddress - Add the specified node to the specified addressing mode, /// returning true if it cannot be done. This just pattern matches for the /// addressing mode. -bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM, +bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM, bool isRoot, unsigned Depth) { // Limit recursion. if (Depth > 5) @@ -719,7 +719,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM, // been picked, we can't fit the result available in the register in the // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement. if (!AlreadySelected || (AM.Base.Reg.Val && AM.IndexReg.Val)) { - SDOperand N0 = N.getOperand(0); + SDValue N0 = N.getOperand(0); if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { GlobalValue *GV = G->getGlobal(); AM.GV = GV; @@ -765,7 +765,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM, unsigned Val = CN->getValue(); if (Val == 1 || Val == 2 || Val == 3) { AM.Scale = 1 << Val; - SDOperand ShVal = N.Val->getOperand(0); + SDValue ShVal = N.Val->getOperand(0); // Okay, we know that we have a scale by now. However, if the scaled // value is an add of something and a constant, we can fold the @@ -804,8 +804,8 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM, if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) { AM.Scale = unsigned(CN->getValue())-1; - SDOperand MulVal = N.Val->getOperand(0); - SDOperand Reg; + SDValue MulVal = N.Val->getOperand(0); + SDValue Reg; // Okay, we know that we have a scale by now. However, if the scaled // value is an add of something and a constant, we can fold the @@ -869,7 +869,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM, // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this // allows us to fold the shift into this addressing mode. if (AlreadySelected) break; - SDOperand Shift = N.getOperand(0); + SDValue Shift = N.getOperand(0); if (Shift.getOpcode() != ISD::SHL) break; // Scale must not be used already. @@ -894,9 +894,9 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM, break; // Get the new AND mask, this folds to a constant. - SDOperand NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(), - SDOperand(C2, 0), SDOperand(C1, 0)); - SDOperand NewAND = CurDAG->getNode(ISD::AND, N.getValueType(), + SDValue NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(), + SDValue(C2, 0), SDValue(C1, 0)); + SDValue NewAND = CurDAG->getNode(ISD::AND, N.getValueType(), Shift.getOperand(0), NewANDMask); NewANDMask.Val->setNodeId(Shift.Val->getNodeId()); NewAND.Val->setNodeId(N.Val->getNodeId()); @@ -912,7 +912,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM, /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the /// specified addressing mode without any further recursion. -bool X86DAGToDAGISel::MatchAddressBase(SDOperand N, X86ISelAddressMode &AM, +bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM, bool isRoot, unsigned Depth) { // Is the base register already occupied? if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) { @@ -936,9 +936,9 @@ bool X86DAGToDAGISel::MatchAddressBase(SDOperand N, X86ISelAddressMode &AM, /// SelectAddr - returns true if it is able pattern match an addressing mode. /// It returns the operands which make up the maximal addressing mode it can /// match by reference. -bool X86DAGToDAGISel::SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base, - SDOperand &Scale, SDOperand &Index, - SDOperand &Disp) { +bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base, + SDValue &Scale, SDValue &Index, + SDValue &Disp) { X86ISelAddressMode AM; if (MatchAddress(N, AM)) return false; @@ -958,7 +958,7 @@ bool X86DAGToDAGISel::SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base, /// isZeroNode - Returns true if Elt is a constant zero or a floating point /// constant +0.0. -static inline bool isZeroNode(SDOperand Elt) { +static inline bool isZeroNode(SDValue Elt) { return ((isa<ConstantSDNode>(Elt) && cast<ConstantSDNode>(Elt)->getValue() == 0) || (isa<ConstantFPSDNode>(Elt) && @@ -969,11 +969,11 @@ static inline bool isZeroNode(SDOperand Elt) { /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to /// match a load whose top elements are either undef or zeros. The load flavor /// is derived from the type of N, which is either v4f32 or v2f64. -bool X86DAGToDAGISel::SelectScalarSSELoad(SDOperand Op, SDOperand Pred, - SDOperand N, SDOperand &Base, - SDOperand &Scale, SDOperand &Index, - SDOperand &Disp, SDOperand &InChain, - SDOperand &OutChain) { +bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred, + SDValue N, SDValue &Base, + SDValue &Scale, SDValue &Index, + SDValue &Disp, SDValue &InChain, + SDValue &OutChain) { if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) { InChain = N.getOperand(0).getValue(1); if (ISD::isNON_EXTLoad(InChain.Val) && @@ -1001,7 +1001,7 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDOperand Op, SDOperand Pred, if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp)) return false; OutChain = LD->getChain(); - InChain = SDOperand(LD, 1); + InChain = SDValue(LD, 1); return true; } return false; @@ -1010,9 +1010,9 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDOperand Op, SDOperand Pred, /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing /// mode it matches can be cost effectively emitted as an LEA instruction. -bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N, - SDOperand &Base, SDOperand &Scale, - SDOperand &Index, SDOperand &Disp) { +bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N, + SDValue &Base, SDValue &Scale, + SDValue &Index, SDValue &Disp) { X86ISelAddressMode AM; if (MatchAddress(N, AM)) return false; @@ -1061,9 +1061,9 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N, return false; } -bool X86DAGToDAGISel::TryFoldLoad(SDOperand P, SDOperand N, - SDOperand &Base, SDOperand &Scale, - SDOperand &Index, SDOperand &Disp) { +bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N, + SDValue &Base, SDValue &Scale, + SDValue &Index, SDValue &Disp) { if (ISD::isNON_EXTLoad(N.Val) && N.hasOneUse() && CanBeFoldedBy(N.Val, P.Val, P.Val)) @@ -1111,8 +1111,8 @@ static SDNode *FindCallStartFromCall(SDNode *Node) { return FindCallStartFromCall(Node->getOperand(0).Val); } -SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT VT) { - SDOperand SRIdx; +SDNode *X86DAGToDAGISel::getTruncate(SDValue N0, MVT VT) { + SDValue SRIdx; switch (VT.getSimpleVT()) { default: assert(0 && "Unknown truncate!"); case MVT::i8: @@ -1130,7 +1130,7 @@ SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT VT) { Opc = X86::MOV32to32_; break; } - N0 = SDOperand(CurDAG->getTargetNode(Opc, N0VT, MVT::Flag, N0), 0); + N0 = SDValue(CurDAG->getTargetNode(Opc, N0VT, MVT::Flag, N0), 0); return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx, N0.getValue(1)); } @@ -1146,7 +1146,7 @@ SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT VT) { } -SDNode *X86DAGToDAGISel::Select(SDOperand N) { +SDNode *X86DAGToDAGISel::Select(SDValue N) { SDNode *Node = N.Val; MVT NVT = Node->getValueType(0); unsigned Opc, MOpc; @@ -1183,13 +1183,13 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { if (TM.getCodeModel() != CodeModel::Small) break; MVT PtrVT = TLI.getPointerTy(); - SDOperand N0 = N.getOperand(0); - SDOperand N1 = N.getOperand(1); + SDValue N0 = N.getOperand(0); + SDValue N1 = N.getOperand(1); if (N.Val->getValueType(0) == PtrVT && N0.getOpcode() == X86ISD::Wrapper && N1.getOpcode() == ISD::Constant) { unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getValue(); - SDOperand C(0, 0); + SDValue C(0, 0); // TODO: handle ExternalSymbolSDNode. if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) { @@ -1204,7 +1204,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { if (C.Val) { if (Subtarget->is64Bit()) { - SDOperand Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1), + SDValue Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1), CurDAG->getRegister(0, PtrVT), C }; return CurDAG->SelectNodeTo(N.Val, X86::LEA64r, MVT::i64, Ops, 4); } else @@ -1218,8 +1218,8 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { case ISD::SMUL_LOHI: case ISD::UMUL_LOHI: { - SDOperand N0 = Node->getOperand(0); - SDOperand N1 = Node->getOperand(1); + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); bool isSigned = Opcode == ISD::SMUL_LOHI; if (!isSigned) @@ -1248,7 +1248,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break; } - SDOperand Tmp0, Tmp1, Tmp2, Tmp3; + SDValue Tmp0, Tmp1, Tmp2, Tmp3; bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3); // multiplty is commmutative if (!foldedLoad) { @@ -1258,8 +1258,8 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { } AddToISelQueue(N0); - SDOperand InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg, - N0, SDOperand()).getValue(1); + SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg, + N0, SDValue()).getValue(1); if (foldedLoad) { AddToISelQueue(N1.getOperand(0)); @@ -1267,21 +1267,21 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { AddToISelQueue(Tmp1); AddToISelQueue(Tmp2); AddToISelQueue(Tmp3); - SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag }; + SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag }; SDNode *CNode = CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6); - InFlag = SDOperand(CNode, 1); + InFlag = SDValue(CNode, 1); // Update the chain. - ReplaceUses(N1.getValue(1), SDOperand(CNode, 0)); + ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); } else { AddToISelQueue(N1); InFlag = - SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0); + SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0); } // Copy the low half of the result, if it is needed. if (!N.getValue(0).use_empty()) { - SDOperand Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), + SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), LoReg, NVT, InFlag); InFlag = Result.getValue(2); ReplaceUses(N.getValue(0), Result); @@ -1293,18 +1293,18 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { } // Copy the high half of the result, if it is needed. if (!N.getValue(1).use_empty()) { - SDOperand Result; + SDValue Result; if (HiReg == X86::AH && Subtarget->is64Bit()) { // Prevent use of AH in a REX instruction by referencing AX instead. // Shift it down 8 bits. Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), X86::AX, MVT::i16, InFlag); InFlag = Result.getValue(2); - Result = SDOperand(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result, + Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result, CurDAG->getTargetConstant(8, MVT::i8)), 0); // Then truncate it down to i8. - SDOperand SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1 - Result = SDOperand(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, + SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1 + Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, MVT::i8, Result, SRIdx), 0); } else { Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), @@ -1328,8 +1328,8 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { case ISD::SDIVREM: case ISD::UDIVREM: { - SDOperand N0 = Node->getOperand(0); - SDOperand N1 = Node->getOperand(1); + SDValue N0 = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); bool isSigned = Opcode == ISD::SDIVREM; if (!isSigned) @@ -1375,46 +1375,46 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { break; } - SDOperand Tmp0, Tmp1, Tmp2, Tmp3; + SDValue Tmp0, Tmp1, Tmp2, Tmp3; bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3); - SDOperand InFlag; + SDValue InFlag; if (NVT == MVT::i8 && !isSigned) { // Special case for div8, just use a move with zero extension to AX to // clear the upper 8 bits (AH). - SDOperand Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain; + SDValue Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain; if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) { - SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) }; + SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) }; AddToISelQueue(N0.getOperand(0)); AddToISelQueue(Tmp0); AddToISelQueue(Tmp1); AddToISelQueue(Tmp2); AddToISelQueue(Tmp3); Move = - SDOperand(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other, + SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other, Ops, 5), 0); Chain = Move.getValue(1); ReplaceUses(N0.getValue(1), Chain); } else { AddToISelQueue(N0); Move = - SDOperand(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0); + SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0); Chain = CurDAG->getEntryNode(); } - Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDOperand()); + Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDValue()); InFlag = Chain.getValue(1); } else { AddToISelQueue(N0); InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), - LoReg, N0, SDOperand()).getValue(1); + LoReg, N0, SDValue()).getValue(1); if (isSigned) { // Sign extend the low part into the high part. InFlag = - SDOperand(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0); + SDValue(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0); } else { // Zero out the high part, effectively zero extending the input. - SDOperand ClrNode = SDOperand(CurDAG->getTargetNode(ClrOpcode, NVT), 0); + SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, NVT), 0); InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg, ClrNode, InFlag).getValue(1); } @@ -1426,21 +1426,21 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { AddToISelQueue(Tmp1); AddToISelQueue(Tmp2); AddToISelQueue(Tmp3); - SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag }; + SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag }; SDNode *CNode = CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6); - InFlag = SDOperand(CNode, 1); + InFlag = SDValue(CNode, 1); // Update the chain. - ReplaceUses(N1.getValue(1), SDOperand(CNode, 0)); + ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); } else { AddToISelQueue(N1); InFlag = - SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0); + SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0); } // Copy the division (low) result, if it is needed. if (!N.getValue(0).use_empty()) { - SDOperand Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), + SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), LoReg, NVT, InFlag); InFlag = Result.getValue(2); ReplaceUses(N.getValue(0), Result); @@ -1452,18 +1452,18 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { } // Copy the remainder (high) result, if it is needed. if (!N.getValue(1).use_empty()) { - SDOperand Result; + SDValue Result; if (HiReg == X86::AH && Subtarget->is64Bit()) { // Prevent use of AH in a REX instruction by referencing AX instead. // Shift it down 8 bits. Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), X86::AX, MVT::i16, InFlag); InFlag = Result.getValue(2); - Result = SDOperand(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result, + Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result, CurDAG->getTargetConstant(8, MVT::i8)), 0); // Then truncate it down to i8. - SDOperand SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1 - Result = SDOperand(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, + SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1 + Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, MVT::i8, Result, SRIdx), 0); } else { Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), @@ -1490,7 +1490,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { if (NVT == MVT::i8) break; - SDOperand N0 = Node->getOperand(0); + SDValue N0 = Node->getOperand(0); // Get the subregsiter index for the type to extend. MVT N0VT = N0.getValueType(); unsigned Idx = (N0VT == MVT::i32) ? X86::SUBREG_32BIT : @@ -1503,9 +1503,9 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { // If we have an index, generate an insert_subreg into undef. AddToISelQueue(N0); - SDOperand Undef = - SDOperand(CurDAG->getTargetNode(X86::IMPLICIT_DEF, NVT), 0); - SDOperand SRIdx = CurDAG->getTargetConstant(Idx, MVT::i32); + SDValue Undef = + SDValue(CurDAG->getTargetNode(X86::IMPLICIT_DEF, NVT), 0); + SDValue SRIdx = CurDAG->getTargetConstant(Idx, MVT::i32); SDNode *ResNode = CurDAG->getTargetNode(X86::INSERT_SUBREG, NVT, Undef, N0, SRIdx); @@ -1519,11 +1519,11 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { } case ISD::SIGN_EXTEND_INREG: { - SDOperand N0 = Node->getOperand(0); + SDValue N0 = Node->getOperand(0); AddToISelQueue(N0); MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT(); - SDOperand TruncOp = SDOperand(getTruncate(N0, SVT), 0); + SDValue TruncOp = SDValue(getTruncate(N0, SVT), 0); unsigned Opc = 0; switch (NVT.getSimpleVT()) { default: assert(0 && "Unknown sign_extend_inreg!"); @@ -1564,7 +1564,7 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { } case ISD::TRUNCATE: { - SDOperand Input = Node->getOperand(0); + SDValue Input = Node->getOperand(0); AddToISelQueue(Node->getOperand(0)); SDNode *ResNode = getTruncate(Input, NVT); @@ -1581,9 +1581,9 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { case ISD::DECLARE: { // Handle DECLARE nodes here because the second operand may have been // wrapped in X86ISD::Wrapper. - SDOperand Chain = Node->getOperand(0); - SDOperand N1 = Node->getOperand(1); - SDOperand N2 = Node->getOperand(2); + SDValue Chain = Node->getOperand(0); + SDValue N1 = Node->getOperand(1); + SDValue N2 = Node->getOperand(2); if (!isa<FrameIndexSDNode>(N1)) break; int FI = cast<FrameIndexSDNode>(N1)->getIndex(); @@ -1594,10 +1594,10 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { isa<GlobalAddressSDNode>(N2.getOperand(0))) { GlobalValue *GV = cast<GlobalAddressSDNode>(N2.getOperand(0))->getGlobal(); - SDOperand Tmp1 = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); - SDOperand Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy()); + SDValue Tmp1 = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); + SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy()); AddToISelQueue(Chain); - SDOperand Ops[] = { Tmp1, Tmp2, Chain }; + SDValue Ops[] = { Tmp1, Tmp2, Chain }; return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, MVT::Other, Ops, 3); } @@ -1621,9 +1621,9 @@ SDNode *X86DAGToDAGISel::Select(SDOperand N) { } bool X86DAGToDAGISel:: -SelectInlineAsmMemoryOperand(const SDOperand &Op, char ConstraintCode, - std::vector<SDOperand> &OutOps, SelectionDAG &DAG){ - SDOperand Op0, Op1, Op2, Op3; +SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, + std::vector<SDValue> &OutOps, SelectionDAG &DAG){ + SDValue Op0, Op1, Op2, Op3; switch (ConstraintCode) { case 'o': // offsetable ?? case 'v': // not offsetable ?? diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ed0773dc00..6f8b1ec7cc 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -41,7 +41,7 @@ using namespace llvm; // Forward declarations. -static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG); +static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG); X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) : TargetLowering(TM) { @@ -755,7 +755,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) } -MVT X86TargetLowering::getSetCCResultType(const SDOperand &) const { +MVT X86TargetLowering::getSetCCResultType(const SDValue &) const { return MVT::i8; } @@ -818,7 +818,7 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align, /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC /// jumptable. -SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, +SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const { if (usesGlobalOffsetTable()) return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy()); @@ -834,7 +834,7 @@ SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table, #include "X86GenCallingConv.inc" /// LowerRET - Lower an ISD::RET node. -SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) { assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args"); SmallVector<CCValAssign, 16> RVLocs; @@ -850,14 +850,14 @@ SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { if (RVLocs[i].isRegLoc()) DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); } - SDOperand Chain = Op.getOperand(0); + SDValue Chain = Op.getOperand(0); // Handle tail call return. Chain = GetPossiblePreceedingTailCall(Chain, X86ISD::TAILCALL); if (Chain.getOpcode() == X86ISD::TAILCALL) { - SDOperand TailCall = Chain; - SDOperand TargetAddress = TailCall.getOperand(1); - SDOperand StackAdjustment = TailCall.getOperand(2); + SDValue TailCall = Chain; + SDValue TargetAddress = TailCall.getOperand(1); + SDValue StackAdjustment = TailCall.getOperand(2); assert(((TargetAddress.getOpcode() == ISD::Register && (cast<RegisterSDNode>(TargetAddress)->getReg() == X86::ECX || cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) || @@ -867,7 +867,7 @@ SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { assert(StackAdjustment.getOpcode() == ISD::Constant && "Expecting a const value"); - SmallVector<SDOperand,8> Operands; + SmallVector<SDValue,8> Operands; Operands.push_back(Chain.getOperand(0)); Operands.push_back(TargetAddress); Operands.push_back(StackAdjustment); @@ -881,9 +881,9 @@ SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { } // Regular return. - SDOperand Flag; + SDValue Flag; - SmallVector<SDOperand, 6> RetOps; + SmallVector<SDValue, 6> RetOps; RetOps.push_back(Chain); // Operand #0 = Chain (updated below) // Operand #1 = Bytes To Pop RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), MVT::i16)); @@ -892,7 +892,7 @@ SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); - SDOperand ValToCopy = Op.getOperand(i*2+1); + SDValue ValToCopy = Op.getOperand(i*2+1); // Returns in ST0/ST1 are handled specially: these are pushed as operands to // the RET instruction and handled by the FP Stackifier. @@ -924,7 +924,7 @@ SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); FuncInfo->setSRetReturnReg(Reg); } - SDOperand Val = DAG.getCopyFromReg(Chain, Reg, getPointerTy()); + SDValue Val = DAG.getCopyFromReg(Chain, Reg, getPointerTy()); Chain = DAG.getCopyToReg(Chain, X86::RAX, Val, Flag); Flag = Chain.getValue(1); @@ -946,7 +946,7 @@ SDOperand X86TargetLowering::LowerRET(SDOperand Op, SelectionDAG &DAG) { /// being lowered. The returns a SDNode with the same number of values as the /// ISD::CALL. SDNode *X86TargetLowering:: -LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, +LowerCallResult(SDValue Chain, SDValue InFlag, SDNode *TheCall, unsigned CallingConv, SelectionDAG &DAG) { // Assign locations to each value returned by this call. @@ -955,7 +955,7 @@ LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs); CCInfo.AnalyzeCallResult(TheCall, RetCC_X86); - SmallVector<SDOperand, 8> ResultVals; + SmallVector<SDValue, 8> ResultVals; // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { @@ -971,7 +971,7 @@ LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode *TheCall, Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(), CopyVT, InFlag).getValue(1); - SDOperand Val = Chain.getValue(0); + SDValue Val = Chain.getValue(0); InFlag = Chain.getValue(2); if (CopyVT != RVLocs[i].getValVT()) { @@ -1015,7 +1015,7 @@ static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg, /// CallIsStructReturn - Determines whether a CALL node uses struct return /// semantics. -static bool CallIsStructReturn(SDOperand Op) { +static bool CallIsStructReturn(SDValue Op) { unsigned NumOps = (Op.getNumOperands() - 5) / 2; if (!NumOps) return false; @@ -1025,7 +1025,7 @@ static bool CallIsStructReturn(SDOperand Op) { /// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct /// return semantics. -static bool ArgsAreStructReturn(SDOperand Op) { +static bool ArgsAreStructReturn(SDValue Op) { unsigned NumArgs = Op.Val->getNumValues() - 1; if (!NumArgs) return false; @@ -1036,7 +1036,7 @@ static bool ArgsAreStructReturn(SDOperand Op) { /// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires /// the callee to pop its own arguments. Callee pop is necessary to support tail /// calls. -bool X86TargetLowering::IsCalleePop(SDOperand Op) { +bool X86TargetLowering::IsCalleePop(SDValue Op) { bool IsVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; if (IsVarArg) return false; @@ -1055,7 +1055,7 @@ bool X86TargetLowering::IsCalleePop(SDOperand Op) { /// CCAssignFnForNode - Selects the correct CCAssignFn for a CALL or /// FORMAL_ARGUMENTS node. -CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { +CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDValue Op) const { unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); if (Subtarget->is64Bit()) { @@ -1080,7 +1080,7 @@ CCAssignFn *X86TargetLowering::CCAssignFnForNode(SDOperand Op) const { /// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to /// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node. NameDecorationStyle -X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDOperand Op) { +X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDValue Op) { unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); if (CC == CallingConv::X86_FastCall) return FastCall; @@ -1111,19 +1111,19 @@ X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) { /// by "Src" to address "Dst" with size and alignment information specified by /// the specific parameter attribute. The copy will be passed as a byval /// function parameter. -static SDOperand -CreateCopyOfByValArgument(SDOperand Src, SDOperand Dst, SDOperand Chain, +static SDValue +CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG) { - SDOperand SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); + SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32); return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(), /*AlwaysInline=*/true, NULL, 0, NULL, 0); } -SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, +SDValue X86TargetLowering::LowerMemArgument(SDValue Op, SelectionDAG &DAG, const CCValAssign &VA, MachineFrameInfo *MFI, unsigned CC, - SDOperand Root, unsigned i) { + SDValue Root, unsigned i) { // Create the nodes corresponding to a load from this parameter slot. ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(3 + i))->getArgFlags(); @@ -1136,15 +1136,15 @@ SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG, // could be overwritten by lowering of arguments in case of a tail call. int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8, VA.getLocMemOffset(), isImmutable); - SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy()); + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); if (Flags.isByVal()) return FIN; return DAG.getLoad(VA.getValVT(), Root, FIN, PseudoSourceValue::getFixedStack(FI), 0); } -SDOperand -X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); @@ -1158,7 +1158,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op)); MachineFrameInfo *MFI = MF.getFrameInfo(); - SDOperand Root = Op.getOperand(0); + SDValue Root = Op.getOperand(0); bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; unsigned CC = MF.getFunction()->getCallingConv(); bool Is64Bit = Subtarget->is64Bit(); @@ -1172,7 +1172,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs); CCInfo.AnalyzeFormalArguments(Op.Val, CCAssignFnForNode(Op)); - SmallVector<SDOperand, 8> ArgValues; + SmallVector<SDValue, 8> ArgValues; unsigned LastVal = ~0U; for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; @@ -1215,7 +1215,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { } unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC); - SDOperand ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); + SDValue ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT); // If this is an 8 or 16-bit value, it is really passed promoted to 32 // bits. Insert an assert[sz]ext to capture this, then truncate to the @@ -1259,7 +1259,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64)); FuncInfo->setSRetReturnReg(Reg); } - SDOperand Copy = DAG.getCopyToReg(DAG.getEntryNode(), Reg, ArgValues[0]); + SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), Reg, ArgValues[0]); Root = DAG.getNode(ISD::TokenFactor, MVT::Other, Copy, Root); } @@ -1316,15 +1316,15 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { TotalNumXMMRegs * 16, 16); // Store the integer parameter registers. - SmallVector<SDOperand, 8> MemOps; - SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); - SDOperand FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, + SmallVector<SDValue, 8> MemOps; + SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); + SDValue FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN, DAG.getIntPtrConstant(VarArgsGPOffset)); for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) { unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs], X86::GR64RegisterClass); - SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); - SDOperand Store = + SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::i64); + SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0); MemOps.push_back(Store); @@ -1338,8 +1338,8 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) { unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs], X86::VR128RegisterClass); - SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); - SDOperand Store = + SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32); + SDValue Store = DAG.getStore(Val.getValue(1), Val, FIN, PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0); MemOps.push_back(Store); @@ -1387,14 +1387,14 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) { ArgValues.size()).getValue(Op.ResNo); } -SDOperand -X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, - const SDOperand &StackPtr, +SDValue +X86TargetLowering::LowerMemOpCallTo(SDValue Op, SelectionDAG &DAG, + const SDValue &StackPtr, const CCValAssign &VA, - SDOperand Chain, - SDOperand Arg) { + SDValue Chain, + SDValue Arg) { unsigned LocMemOffset = VA.getLocMemOffset(); - SDOperand PtrOff = DAG.getIntPtrConstant(LocMemOffset); + SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(Op.getOperand(6+2*VA.getValNo()))->getArgFlags(); @@ -1407,10 +1407,10 @@ X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, /// EmitTailCallLoadRetAddr - Emit a load of return adress if tail call /// optimization is performed and it is required. -SDOperand +SDValue X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, - SDOperand &OutRetAddr, - SDOperand Chain, + SDValue &OutRetAddr, + SDValue Chain, bool IsTailCall, bool Is64Bit, int FPDiff) { @@ -1421,14 +1421,14 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, OutRetAddr = getReturnAddressFrameIndex(DAG); // Load the "old" Return address. OutRetAddr = DAG.getLoad(VT, Chain,OutRetAddr, NULL, 0); - return SDOperand(OutRetAddr.Val, 1); + return SDValue(OutRetAddr.Val, 1); } /// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call /// optimization is performed and it is required (FPDiff!=0). -static SDOperand +static SDValue EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, - SDOperand Chain, SDOperand RetAddrFrIdx, + SDValue Chain, SDValue RetAddrFrIdx, bool Is64Bit, int FPDiff) { // Store the return address to the appropriate stack slot. if (!FPDiff) return Chain; @@ -1437,20 +1437,20 @@ EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, int NewReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize); MVT VT = Is64Bit ? MVT::i64 : MVT::i32; - SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); + SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx, PseudoSourceValue::getFixedStack(NewReturnAddrFI), 0); return Chain; } -SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); - SDOperand Chain = Op.getOperand(0); + SDValue Chain = Op.getOperand(0); unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getValue() != 0; bool IsTailCall = cast<ConstantSDNode>(Op.getOperand(3))->getValue() != 0 && CC == CallingConv::Fast && PerformTailCallOpt; - SDOperand Callee = Op.getOperand(4); + SDValue Callee = Op.getOperand(4); bool Is64Bit = Subtarget->is64Bit(); bool IsStructRet = CallIsStructReturn(Op); @@ -1489,20 +1489,20 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes)); - SDOperand RetAddrFrIdx; + SDValue RetAddrFrIdx; // Load return adress for tail calls. Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, IsTailCall, Is64Bit, FPDiff); - SmallVector<std::pair<unsigned, SDOperand>, 8> RegsToPass; - SmallVector<SDOperand, 8> MemOpChains; - SDOperand StackPtr; + SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; + SmallVector<SDValue, 8> MemOpChains; + SDValue StackPtr; // Walk the register/memloc assignments, inserting copies/loads. In the case // of tail call optimization arguments are handle later. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; - SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); + SDValue Arg = Op.getOperand(5+2*VA.getValNo()); bool isByVal = cast<ARG_FLAGSSDNode>(Op.getOperand(6+2*VA.getValNo()))-> getArgFlags().isByVal(); @@ -1565,7 +1565,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { // Build a sequence of copy-to-reg nodes chained together with token chain // and flag operands which copy the outgoing args into registers. - SDOperand InFlag; + SDValue InFlag; // Tail call byval lowering might overwrite argument registers so in case of // tail call optimization the copies to registers are lowered later. if (!IsTailCall) @@ -1624,17 +1624,17 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { // For tail calls lower the arguments to the 'real' stack slot. if (IsTailCall) { - SmallVector<SDOperand, 8> MemOpChains2; - SDOperand FIN; + SmallVector<SDValue, 8> MemOpChains2; + SDValue FIN; int FI = 0; // Do not flag preceeding copytoreg stuff together with the following stuff. - InFlag = SDOperand(); + InFlag = SDValue(); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; if (!VA.isRegLoc()) { assert(VA.isMemLoc()); - SDOperand Arg = Op.getOperand(5+2*VA.getValNo()); - SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo()); + SDValue Arg = Op.getOperand(5+2*VA.getValNo()); + SDValue FlagsOp = Op.getOperand(6+2*VA.getValNo()); ISD::ArgFlagsTy Flags = cast<ARG_FLAGSSDNode>(FlagsOp)->getArgFlags(); // Create frame index. @@ -1645,7 +1645,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { if (Flags.isByVal()) { // Copy relative to framepointer. - SDOperand Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); + SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); if (StackPtr.Val == 0) StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy()); Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source); @@ -1671,7 +1671,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { InFlag); InFlag = Chain.getValue(1); } - InFlag =SDOperand(); + InFlag =SDValue(); // Store the return address to the appropriate stack slot. Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, @@ -1701,7 +1701,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { // Returns a chain & a flag for retval copy to use. SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; if (IsTailCall) { Ops.push_back(Chain); @@ -1748,7 +1748,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { Chain = DAG.getNode(X86ISD::TAILCALL, Op.Val->getVTList(), &Ops[0], Ops.size()); - return SDOperand(Chain.Val, Op.ResNo); + return SDValue(Chain.Val, Op.ResNo); } Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size()); @@ -1775,7 +1775,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) { // Handle result values, copying them out of physregs into vregs that we // return. - return SDOperand(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); + return SDValue(LowerCallResult(Chain, InFlag, Op.Val, CC, DAG), Op.ResNo); } @@ -1839,8 +1839,8 @@ unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, /// following the call is a return. A function is eligible if caller/callee /// calling conventions match, currently only fastcc supports tail calls, and /// the function CALL is immediatly followed by a RET. -bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, - SDOperand Ret, +bool X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Call, + SDValue Ret, SelectionDAG& DAG) const { if (!PerformTailCallOpt) return false; @@ -1850,7 +1850,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, unsigned CallerCC = MF.getFunction()->getCallingConv(); unsigned CalleeCC = cast<ConstantSDNode>(Call.getOperand(1))->getValue(); if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { - SDOperand Callee = Call.getOperand(4); + SDValue Callee = Call.getOperand(4); // On x86/32Bit PIC/GOT tail calls are supported. if (getTargetMachine().getRelocationModel() != Reloc::PIC_ || !Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit()) @@ -1872,7 +1872,7 @@ bool X86TargetLowering::IsEligibleForTailCallOptimization(SDOperand Call, //===----------------------------------------------------------------------===// -SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { +SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { MachineFunction &MF = DAG.getMachineFunction(); X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); int ReturnAddrIndex = FuncInfo->getRAIndex(); @@ -1897,7 +1897,7 @@ SDOperand X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) { /// translation. X86CC is the translated CondCode. LHS/RHS are modified as /// needed. static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP, - unsigned &X86CC, SDOperand &LHS, SDOperand &RHS, + unsigned &X86CC, SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) { X86CC = X86::COND_INVALID; if (!isFP) { @@ -1989,7 +1989,7 @@ static bool hasFPCMov(unsigned X86CC) { /// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return /// true if Op is undef or if its value falls within the specified range (L, H]. -static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { +static bool isUndefOrInRange(SDValue Op, unsigned Low, unsigned Hi) { if (Op.getOpcode() == ISD::UNDEF) return true; @@ -1999,7 +1999,7 @@ static bool isUndefOrInRange(SDOperand Op, unsigned Low, unsigned Hi) { /// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return /// true if Op is undef or if its value equal to the specified value. -static bool isUndefOrEqual(SDOperand Op, unsigned Val) { +static bool isUndefOrEqual(SDValue Op, unsigned Val) { if (Op.getOpcode() == ISD::UNDEF) return true; return cast<ConstantSDNode>(Op)->getValue() == Val; @@ -2015,7 +2015,7 @@ bool X86::isPSHUFDMask(SDNode *N) { // Check if the value doesn't reference the second vector. for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); if (cast<ConstantSDNode>(Arg)->getValue() >= e) @@ -2035,7 +2035,7 @@ bool X86::isPSHUFHWMask(SDNode *N) { // Lower quadword copied in order. for (unsigned i = 0; i != 4; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); if (cast<ConstantSDNode>(Arg)->getValue() != i) @@ -2044,7 +2044,7 @@ bool X86::isPSHUFHWMask(SDNode *N) { // Upper quadword shuffled. for (unsigned i = 4; i != 8; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); @@ -2185,7 +2185,7 @@ bool X86::isMOVHPMask(SDNode *N) { return false; for (unsigned i = 0; i < NumElems/2; ++i) { - SDOperand Arg = N->getOperand(i + NumElems/2); + SDValue Arg = N->getOperand(i + NumElems/2); if (!isUndefOrEqual(Arg, i + NumElems)) return false; } @@ -2201,8 +2201,8 @@ bool static isUNPCKLMask(SDOperandPtr Elts, unsigned NumElts, return false; for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { - SDOperand BitI = Elts[i]; - SDOperand BitI1 = Elts[i+1]; + SDValue BitI = Elts[i]; + SDValue BitI1 = Elts[i+1]; if (!isUndefOrEqual(BitI, j)) return false; if (V2IsSplat) { @@ -2230,8 +2230,8 @@ bool static isUNPCKHMask(SDOperandPtr Elts, unsigned NumElts, return false; for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) { - SDOperand BitI = Elts[i]; - SDOperand BitI1 = Elts[i+1]; + SDValue BitI = Elts[i]; + SDValue BitI1 = Elts[i+1]; if (!isUndefOrEqual(BitI, j + NumElts/2)) return false; if (V2IsSplat) { @@ -2262,8 +2262,8 @@ bool X86::isUNPCKL_v_undef_Mask(SDNode *N) { return false; for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) { - SDOperand BitI = N->getOperand(i); - SDOperand BitI1 = N->getOperand(i+1); + SDValue BitI = N->getOperand(i); + SDValue BitI1 = N->getOperand(i+1); if (!isUndefOrEqual(BitI, j)) return false; @@ -2285,8 +2285,8 @@ bool X86::isUNPCKH_v_undef_Mask(SDNode *N) { return false; for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) { - SDOperand BitI = N->getOperand(i); - SDOperand BitI1 = N->getOperand(i + 1); + SDValue BitI = N->getOperand(i); + SDValue BitI1 = N->getOperand(i + 1); if (!isUndefOrEqual(BitI, j)) return false; @@ -2333,7 +2333,7 @@ static bool isCommutedMOVL(SDOperandPtr Ops, unsigned NumOps, return false; for (unsigned i = 1; i < NumOps; ++i) { - SDOperand Arg = Ops[i]; + SDValue Arg = Ops[i]; if (!(isUndefOrEqual(Arg, i+NumOps) || (V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) || (V2IsSplat && isUndefOrEqual(Arg, NumOps)))) @@ -2360,7 +2360,7 @@ bool X86::isMOVSHDUPMask(SDNode *N) { // Expect 1, 1, 3, 3 for (unsigned i = 0; i < 2; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); @@ -2369,7 +2369,7 @@ bool X86::isMOVSHDUPMask(SDNode *N) { bool HasHi = false; for (unsigned i = 2; i < 4; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); @@ -2391,7 +2391,7 @@ bool X86::isMOVSLDUPMask(SDNode *N) { // Expect 0, 0, 2, 2 for (unsigned i = 0; i < 2; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); @@ -2400,7 +2400,7 @@ bool X86::isMOVSLDUPMask(SDNode *N) { bool HasHi = false; for (unsigned i = 2; i < 4; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); @@ -2430,10 +2430,10 @@ static bool isSplatMask(SDNode *N) { // This is a splat operation if each element of the permute is the same, and // if the value doesn't reference the second vector. unsigned NumElems = N->getNumOperands(); - SDOperand ElementBase; + SDValue ElementBase; unsigned i = 0; for (; i != NumElems; ++i) { - SDOperand Elt = N->getOperand(i); + SDValue Elt = N->getOperand(i); if (isa<ConstantSDNode>(Elt)) { ElementBase = Elt; break; @@ -2444,7 +2444,7 @@ static bool isSplatMask(SDNode *N) { return false; for (; i != NumElems; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); if (Arg != ElementBase) return false; @@ -2485,7 +2485,7 @@ unsigned X86::getShuffleSHUFImmediate(SDNode *N) { unsigned Mask = 0; for (unsigned i = 0; i < NumOperands; ++i) { unsigned Val = 0; - SDOperand Arg = N->getOperand(NumOperands-i-1); + SDValue Arg = N->getOperand(NumOperands-i-1); if (Arg.getOpcode() != ISD::UNDEF) Val = cast<ConstantSDNode>(Arg)->getValue(); if (Val >= NumOperands) Val -= NumOperands; @@ -2505,7 +2505,7 @@ unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) { // 8 nodes, but we only care about the last 4. for (unsigned i = 7; i >= 4; --i) { unsigned Val = 0; - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() != ISD::UNDEF) Val = cast<ConstantSDNode>(Arg)->getValue(); Mask |= (Val - 4); @@ -2524,7 +2524,7 @@ unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) { // 8 nodes, but we only care about the first 4. for (int i = 3; i >= 0; --i) { unsigned Val = 0; - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() != ISD::UNDEF) Val = cast<ConstantSDNode>(Arg)->getValue(); Mask |= Val; @@ -2546,7 +2546,7 @@ static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { // Lower quadword shuffled. for (unsigned i = 0; i != 4; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); @@ -2556,7 +2556,7 @@ static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { // Upper quadword shuffled. for (unsigned i = 4; i != 8; ++i) { - SDOperand Arg = N->getOperand(i); + SDValue Arg = N->getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!"); unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); @@ -2569,17 +2569,17 @@ static bool isPSHUFHW_PSHUFLWMask(SDNode *N) { /// CommuteVectorShuffle - Swap vector_shuffle operands as well as /// values in ther permute mask. -static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, - SDOperand &V2, SDOperand &Mask, +static SDValue CommuteVectorShuffle(SDValue Op, SDValue &V1, + SDValue &V2, SDValue &Mask, SelectionDAG &DAG) { MVT VT = Op.getValueType(); MVT MaskVT = Mask.getValueType(); MVT EltVT = MaskVT.getVectorElementType(); unsigned NumElems = Mask.getNumOperands(); - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; for (unsigned i = 0; i != NumElems; ++i) { - SDOperand Arg = Mask.getOperand(i); + SDValue Arg = Mask.getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) { MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); continue; @@ -2600,13 +2600,13 @@ static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1, /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming /// the two vector operands have swapped position. static -SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) { +SDValue CommuteVectorShuffleMask(SDValue Mask, SelectionDAG &DAG) { MVT MaskVT = Mask.getValueType(); MVT EltVT = MaskVT.getVectorElementType(); unsigned NumElems = Mask.getNumOperands(); - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; for (unsigned i = 0; i != NumElems; ++i) { - SDOperand Arg = Mask.getOperand(i); + SDValue Arg = Mask.getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) { MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT)); continue; @@ -2685,7 +2685,7 @@ static bool isSplatVector(SDNode *N) { if (N->getOpcode() != ISD::BUILD_VECTOR) return false; - SDOperand SplatValue = N->getOperand(0); + SDValue SplatValue = N->getOperand(0); for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) if (N->getOperand(i) != SplatValue) return false; @@ -2698,12 +2698,12 @@ static bool isUndefShuffle(SDNode *N) { if (N->getOpcode() != ISD::VECTOR_SHUFFLE) return false; - SDOperand V1 = N->getOperand(0); - SDOperand V2 = N->getOperand(1); - SDOperand Mask = N->getOperand(2); + SDValue V1 = N->getOperand(0); + SDValue V2 = N->getOperand(1); + SDValue Mask = N->getOperand(2); unsigned NumElems = Mask.getNumOperands(); for (unsigned i = 0; i != NumElems; ++i) { - SDOperand Arg = Mask.getOperand(i); + SDValue Arg = Mask.getOperand(i); if (Arg.getOpcode() != ISD::UNDEF) { unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); if (Val < NumElems && V1.getOpcode() != ISD::UNDEF) @@ -2717,7 +2717,7 @@ static bool isUndefShuffle(SDNode *N) { /// isZeroNode - Returns true if Elt is a constant zero or a floating point /// constant +0.0. -static inline bool isZeroNode(SDOperand Elt) { +static inline bool isZeroNode(SDValue Elt) { return ((isa<ConstantSDNode>(Elt) && cast<ConstantSDNode>(Elt)->getValue() == 0) || (isa<ConstantFPSDNode>(Elt) && @@ -2730,12 +2730,12 @@ static bool isZeroShuffle(SDNode *N) { if (N->getOpcode() != ISD::VECTOR_SHUFFLE) return false; - SDOperand V1 = N->getOperand(0); - SDOperand V2 = N->getOperand(1); - SDOperand Mask = N->getOperand(2); + SDValue V1 = N->getOperand(0); + SDValue V2 = N->getOperand(1); + SDValue Mask = N->getOperand(2); unsigned NumElems = Mask.getNumOperands(); for (unsigned i = 0; i != NumElems; ++i) { - SDOperand Arg = Mask.getOperand(i); + SDValue Arg = Mask.getOperand(i); if (Arg.getOpcode() == ISD::UNDEF) continue; @@ -2761,20 +2761,20 @@ static bool isZeroShuffle(SDNode *N) { /// getZeroVector - Returns a vector of specified type with all zero elements. /// -static SDOperand getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG) { +static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG) { assert(VT.isVector() && "Expected a vector type"); // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest // type. This ensures they get CSE'd. - SDOperand Vec; + SDValue Vec; if (VT.getSizeInBits() == 64) { // MMX - SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); + SDValue Cst = DAG.getTargetConstant(0, MVT::i32); Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); } else if (HasSSE2) { // SSE2 - SDOperand Cst = DAG.getTargetConstant(0, MVT::i32); + SDValue Cst = DAG.getTargetConstant(0, MVT::i32); Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst); } else { // SSE1 - SDOperand Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); + SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4f32, Cst, Cst, Cst, Cst); } return DAG.getNode(ISD::BIT_CONVERT, VT, Vec); @@ -2782,13 +2782,13 @@ static SDOperand getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG) { /// getOnesVector - Returns a vector of specified type with all bits set. /// -static SDOperand getOnesVector(MVT VT, SelectionDAG &DAG) { +static SDValue getOnesVector(MVT VT, SelectionDAG &DAG) { assert(VT.isVector() && "Expected a vector type"); // Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest // type. This ensures they get CSE'd. - SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32); - SDOperand Vec; + SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); + SDValue Vec; if (VT.getSizeInBits() == 64) // MMX Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst); else // SSE @@ -2799,14 +2799,14 @@ static SDOperand getOnesVector(MVT VT, SelectionDAG &DAG) { /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements /// that point to V2 points to its first element. -static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { +static SDValue NormalizeMask(SDValue Mask, SelectionDAG &DAG) { assert(Mask.getOpcode() == ISD::BUILD_VECTOR); bool Changed = false; - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; unsigned NumElems = Mask.getNumOperands(); for (unsigned i = 0; i != NumElems; ++i) { - SDOperand Arg = Mask.getOperand(i); + SDValue Arg = Mask.getOperand(i); if (Arg.getOpcode() != ISD::UNDEF) { unsigned Val = cast<ConstantSDNode>(Arg)->getValue(); if (Val > NumElems) { @@ -2825,11 +2825,11 @@ static SDOperand NormalizeMask(SDOperand Mask, SelectionDAG &DAG) { /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd /// operation of specified width. -static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { +static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT BaseVT = MaskVT.getVectorElementType(); - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; MaskVec.push_back(DAG.getConstant(NumElems, BaseVT)); for (unsigned i = 1; i != NumElems; ++i) MaskVec.push_back(DAG.getConstant(i, BaseVT)); @@ -2838,10 +2838,10 @@ static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) { /// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation /// of specified width. -static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { +static SDValue getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT BaseVT = MaskVT.getVectorElementType(); - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; for (unsigned i = 0, e = NumElems/2; i != e; ++i) { MaskVec.push_back(DAG.getConstant(i, BaseVT)); MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT)); @@ -2851,11 +2851,11 @@ static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) { /// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation /// of specified width. -static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { +static SDValue getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT BaseVT = MaskVT.getVectorElementType(); unsigned Half = NumElems/2; - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; for (unsigned i = 0; i != Half; ++i) { MaskVec.push_back(DAG.getConstant(i + Half, BaseVT)); MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT)); @@ -2866,11 +2866,11 @@ static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) { /// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps /// element #0 of a vector with the specified index, leaving the rest of the /// elements in place. -static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, +static SDValue getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, SelectionDAG &DAG) { MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT BaseVT = MaskVT.getVectorElementType(); - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; // Element #0 of the result gets the elt we are replacing. MaskVec.push_back(DAG.getConstant(DestElt, BaseVT)); for (unsigned i = 1; i != NumElems; ++i) @@ -2879,13 +2879,13 @@ static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt, } /// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32. -static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { +static SDValue PromoteSplat(SDValue Op, SelectionDAG &DAG, bool HasSSE2) { MVT PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32; MVT VT = Op.getValueType(); if (PVT == VT) return Op; - SDOperand V1 = Op.getOperand(0); - SDOperand Mask = Op.getOperand(2); + SDValue V1 = Op.getOperand(0); + SDValue Mask = Op.getOperand(2); unsigned NumElems = Mask.getNumOperands(); // Special handling of v4f32 -> v4i32. if (VT != MVT::v4f32) { @@ -2898,7 +2898,7 @@ static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { } V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1); - SDOperand Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, PVT, V1, + SDValue Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, PVT, V1, DAG.getNode(ISD::UNDEF, PVT), Mask); return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle); } @@ -2907,22 +2907,22 @@ static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) { /// vector of zero or undef vector. This produces a shuffle where the low /// element of V2 is swizzled into the zero/undef vector, landing at element /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). -static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, +static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, bool isZero, bool HasSSE2, SelectionDAG &DAG) { MVT VT = V2.getValueType(); - SDOperand V1 = isZero + SDValue V1 = isZero ? getZeroVector(VT, HasSSE2, DAG) : DAG.getNode(ISD::UNDEF, VT); unsigned NumElems = V2.getValueType().getVectorNumElements(); MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT EVT = MaskVT.getVectorElementType(); - SmallVector<SDOperand, 16> MaskVec; + SmallVector<SDValue, 16> MaskVec; for (unsigned i = 0; i != NumElems; ++i) if (i == Idx) // If this is the insertion idx, put the low elt of V2 here. MaskVec.push_back(DAG.getConstant(NumElems, EVT)); else MaskVec.push_back(DAG.getConstant(i, EVT)); - SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask); } @@ -2930,18 +2930,18 @@ static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx, /// getNumOfConsecutiveZeros - Return the number of elements in a result of /// a shuffle that is zero. static -unsigned getNumOfConsecutiveZeros(SDOperand Op, SDOperand Mask, +unsigned getNumOfConsecutiveZeros(SDValue Op, SDValue Mask, unsigned NumElems, bool Low, SelectionDAG &DAG) { unsigned NumZeros = 0; for (unsigned i = 0; i < NumElems; ++i) { unsigned Index = Low ? i : NumElems-i-1; - SDOperand Idx = Mask.getOperand(Index); + SDValue Idx = Mask.getOperand(Index); if (Idx.getOpcode() == ISD::UNDEF) { ++NumZeros; continue; } - SDOperand Elt = DAG.getShuffleScalarElt(Op.Val, Index); + SDValue Elt = DAG.getShuffleScalarElt(Op.Val, Index); if (Elt.Val && isZeroNode(Elt)) ++NumZeros; else @@ -2952,8 +2952,8 @@ unsigned getNumOfConsecutiveZeros(SDOperand Op, SDOperand Mask, /// isVectorShift - Returns true if the shuffle can be implemented as a /// logical left or right shift of a vector. -static bool isVectorShift(SDOperand Op, SDOperand Mask, SelectionDAG &DAG, - bool &isLeft, SDOperand &ShVal, unsigned &ShAmt) { +static bool isVectorShift(SDValue Op, SDValue Mask, SelectionDAG &DAG, + bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { unsigned NumElems = Mask.getNumOperands(); isLeft = true; @@ -2969,7 +2969,7 @@ static bool isVectorShift(SDOperand Op, SDOperand Mask, SelectionDAG &DAG, bool SeenV2 = false; for (unsigned i = NumZeros; i < NumElems; ++i) { unsigned Val = isLeft ? (i - NumZeros) : i; - SDOperand Idx = Mask.getOperand(isLeft ? i : (i - NumZeros)); + SDValue Idx = Mask.getOperand(isLeft ? i : (i - NumZeros)); if (Idx.getOpcode() == ISD::UNDEF) continue; unsigned Index = cast<ConstantSDNode>(Idx)->getValue(); @@ -2993,13 +2993,13 @@ static bool isVectorShift(SDOperand Op, SDOperand Mask, SelectionDAG &DAG, /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. /// -static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, +static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, unsigned NumNonZero, unsigned NumZero, SelectionDAG &DAG, TargetLowering &TLI) { if (NumNonZero > 8) - return SDOperand(); + return SDValue(); - SDOperand V(0, 0); + SDValue V(0, 0); bool First = true; for (unsigned i = 0; i < 16; ++i) { bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; @@ -3012,7 +3012,7 @@ static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, } if ((i & 1) != 0) { - SDOperand ThisElt(0, 0), LastElt(0, 0); + SDValue ThisElt(0, 0), LastElt(0, 0); bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0; if (LastIsNonZero) { LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1)); @@ -3037,13 +3037,13 @@ static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros, /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. /// -static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, +static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, unsigned NumNonZero, unsigned NumZero, SelectionDAG &DAG, TargetLowering &TLI) { if (NumNonZero > 4) - return SDOperand(); + return SDValue(); - SDOperand V(0, 0); + SDValue V(0, 0); bool First = true; for (unsigned i = 0; i < 8; ++i) { bool isNonZero = (NonZeros & (1 << i)) != 0; @@ -3065,7 +3065,7 @@ static SDOperand LowerBuildVectorv8i16(SDOperand Op, unsigned NonZeros, /// getVShift - Return a vector logical shift node. /// -static SDOperand getVShift(bool isLeft, MVT VT, SDOperand SrcOp, +static SDValue getVShift(bool isLeft, MVT VT, SDValue SrcOp, unsigned NumBits, SelectionDAG &DAG, const TargetLowering &TLI) { bool isMMX = VT.getSizeInBits() == 64; @@ -3077,8 +3077,8 @@ static SDOperand getVShift(bool isLeft, MVT VT, SDOperand SrcOp, DAG.getConstant(NumBits, TLI.getShiftAmountTy()))); } -SDOperand -X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) { // All zero's are handled with pxor, all one's are handled with pcmpeqd. if (ISD::isBuildVectorAllZeros(Op.Val) || ISD::isBuildVectorAllOnes(Op.Val)) { // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to @@ -3101,9 +3101,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { unsigned NumNonZero = 0; unsigned NonZeros = 0; bool IsAllConstants = true; - SmallSet<SDOperand, 8> Values; + SmallSet<SDValue, 8> Values; for (unsigned i = 0; i < NumElems; ++i) { - SDOperand Elt = Op.getOperand(i); + SDValue Elt = Op.getOperand(i); if (Elt.getOpcode() == ISD::UNDEF) continue; Values.insert(Elt); @@ -3126,7 +3126,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Special case for single non-zero, non-undef, element. if (NumNonZero == 1 && NumElems <= 4) { unsigned Idx = CountTrailingZeros_32(NonZeros); - SDOperand Item = Op.getOperand(Idx); + SDValue Item = Op.getOperand(Idx); // If this is an insertion of an i64 value on x86-32, and if the top bits of // the value are obviously zero, truncate the value to i32 and do the @@ -3150,7 +3150,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Now we have our 32-bit value zero extended in the low element of // a vector. If Idx != 0, swizzle it into place. if (Idx != 0) { - SDOperand Ops[] = { + SDValue Ops[] = { Item, DAG.getNode(ISD::UNDEF, Item.getValueType()), getSwapEltZeroMask(VecElts, Idx, DAG) }; @@ -3184,7 +3184,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { } if (IsAllConstants) // Otherwise, it's better to do a constpool load. - return SDOperand(); + return SDValue(); // Otherwise, if this is a vector with i32 or f32 elements, and the element // is a non-constant being inserted into an element other than the low one, @@ -3199,10 +3199,10 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { Subtarget->hasSSE2(), DAG); MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT MaskEVT = MaskVT.getVectorElementType(); - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; for (unsigned i = 0; i < NumElems; i++) MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT)); - SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item, DAG.getNode(ISD::UNDEF, VT), Mask); @@ -3211,41 +3211,41 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Splat is obviously ok. Let legalizer expand it to a shuffle. if (Values.size() == 1) - return SDOperand(); + return SDValue(); // A vector full of immediates; various special cases are already // handled, so this is best done with a single constant-pool load. if (IsAllConstants) - return SDOperand(); + return SDValue(); // Let legalizer expand 2-wide build_vectors. if (EVTBits == 64) { if (NumNonZero == 1) { // One half is zero or undef. unsigned Idx = CountTrailingZeros_32(NonZeros); - SDOperand V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, + SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(Idx)); return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget->hasSSE2(), DAG); } - return SDOperand(); + return SDValue(); } // If element VT is < 32 bits, convert it to inserts into a zero vector. if (EVTBits == 8 && NumElems == 16) { - SDOperand V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, + SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG, *this); if (V.Val) return V; } if (EVTBits == 16 && NumElems == 8) { - SDOperand V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, + SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, *this); if (V.Val) return V; } // If element VT is == 32 bits, turn it into a number of shuffles. - SmallVector<SDOperand, 8> V; + SmallVector<SDValue, 8> V; V.resize(NumElems); if (NumElems == 4 && NumZero > 0) { for (unsigned i = 0; i < 4; ++i) { @@ -3279,7 +3279,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems); MVT EVT = MaskVT.getVectorElementType(); - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; bool Reverse = (NonZeros & 0x3) == 2; for (unsigned i = 0; i < 2; ++i) if (Reverse) @@ -3292,7 +3292,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT)); else MaskVec.push_back(DAG.getConstant(i+NumElems, EVT)); - SDOperand ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, + SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size()); return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask); } @@ -3303,7 +3303,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> - SDOperand UnpckMask = getUnpacklMask(NumElems, DAG); + SDValue UnpckMask = getUnpacklMask(NumElems, DAG); for (unsigned i = 0; i < NumElems; ++i) V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i)); NumElems >>= 1; @@ -3316,24 +3316,24 @@ X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) { return V[0]; } - return SDOperand(); + return SDValue(); } static -SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, - SDOperand PermMask, SelectionDAG &DAG, +SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2, + SDValue PermMask, SelectionDAG &DAG, TargetLowering &TLI) { - SDOperand NewV; + SDValue NewV; MVT MaskVT = MVT::getIntVectorWithNumElements(8); MVT MaskEVT = MaskVT.getVectorElementType(); MVT PtrVT = TLI.getPointerTy(); - SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(), + SmallVector<SDValue, 8> MaskElts(PermMask.Val->op_begin(), PermMask.Val->op_end()); // First record which half of which vector the low elements come from. SmallVector<unsigned, 4> LowQuad(4); for (unsigned i = 0; i < 4; ++i) { - SDOperand Elt = MaskElts[i]; + SDValue Elt = MaskElts[i]; if (Elt.getOpcode() == ISD::UNDEF) continue; unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); @@ -3352,7 +3352,7 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, // Record which half of which vector the high elements come from. SmallVector<unsigned, 4> HighQuad(4); for (unsigned i = 4; i < 8; ++i) { - SDOperand Elt = MaskElts[i]; + SDValue Elt = MaskElts[i]; if (Elt.getOpcode() == ISD::UNDEF) continue; unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); @@ -3371,7 +3371,7 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, // If it's possible to sort parts of either half with PSHUF{H|L}W, then do it. if (BestLowQuad != -1 || BestHighQuad != -1) { // First sort the 4 chunks in order using shufpd. - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; if (BestLowQuad != -1) MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32)); else @@ -3380,7 +3380,7 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32)); else MaskVec.push_back(DAG.getConstant(1, MVT::i32)); - SDOperand Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); + SDValue Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2); NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64, DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1), DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask); @@ -3393,7 +3393,7 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, MaskVec.clear(); bool AnyOutOrder = false; for (unsigned i = 0; i != 4; ++i) { - SDOperand Elt = MaskElts[i]; + SDValue Elt = MaskElts[i]; if (Elt.getOpcode() == ISD::UNDEF) { MaskVec.push_back(Elt); InOrder.set(i); @@ -3411,7 +3411,7 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, if (AnyOutOrder) { for (unsigned i = 4; i != 8; ++i) MaskVec.push_back(DAG.getConstant(i, MaskEVT)); - SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); } } @@ -3423,7 +3423,7 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, MaskVec.push_back(DAG.getConstant(i, MaskEVT)); bool AnyOutOrder = false; for (unsigned i = 4; i != 8; ++i) { - SDOperand Elt = MaskElts[i]; + SDValue Elt = MaskElts[i]; if (Elt.getOpcode() == ISD::UNDEF) { MaskVec.push_back(Elt); InOrder.set(i); @@ -3439,7 +3439,7 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, } } if (AnyOutOrder) { - SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask); } } @@ -3448,9 +3448,9 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, for (unsigned i = 0; i != 8; ++i) { if (InOrder[i]) continue; - SDOperand Elt = MaskElts[i]; + SDValue Elt = MaskElts[i]; unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); - SDOperand ExtOp = (EltIdx < 8) + SDValue ExtOp = (EltIdx < 8) ? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, DAG.getConstant(EltIdx, PtrVT)) : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, @@ -3468,10 +3468,10 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, unsigned V1FromV1 = 0; unsigned V2InOrder = 0; unsigned V2FromV2 = 0; - SmallVector<SDOperand, 8> V1Elts; - SmallVector<SDOperand, 8> V2Elts; + SmallVector<SDValue, 8> V1Elts; + SmallVector<SDValue, 8> V2Elts; for (unsigned i = 0; i < 8; ++i) { - SDOperand Elt = MaskElts[i]; + SDValue Elt = MaskElts[i]; if (Elt.getOpcode() == ISD::UNDEF) { V1Elts.push_back(Elt); V2Elts.push_back(Elt); @@ -3509,9 +3509,9 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, if (V1FromV1) { // If there are elements that are from V1 but out of place, // then first sort them in place - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; for (unsigned i = 0; i < 8; ++i) { - SDOperand Elt = V1Elts[i]; + SDValue Elt = V1Elts[i]; if (Elt.getOpcode() == ISD::UNDEF) { MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); continue; @@ -3522,19 +3522,19 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, else MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT)); } - SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8); V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask); } NewV = V1; for (unsigned i = 0; i < 8; ++i) { - SDOperand Elt = V1Elts[i]; + SDValue Elt = V1Elts[i]; if (Elt.getOpcode() == ISD::UNDEF) continue; unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); if (EltIdx < 8) continue; - SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, + SDValue ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2, DAG.getConstant(EltIdx - 8, PtrVT)); NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, DAG.getConstant(i, PtrVT)); @@ -3544,11 +3544,11 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, // All elements are from V1. NewV = V1; for (unsigned i = 0; i < 8; ++i) { - SDOperand Elt = V1Elts[i]; + SDValue Elt = V1Elts[i]; if (Elt.getOpcode() == ISD::UNDEF) continue; unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); - SDOperand ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, + SDValue ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1, DAG.getConstant(EltIdx, PtrVT)); NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp, DAG.getConstant(i, PtrVT)); @@ -3563,9 +3563,9 @@ SDOperand LowerVECTOR_SHUFFLEv8i16(SDOperand V1, SDOperand V2, /// the right sequence. e.g. /// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15> static -SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, +SDValue RewriteAsNarrowerShuffle(SDValue V1, SDValue V2, MVT VT, - SDOperand PermMask, SelectionDAG &DAG, + SDValue PermMask, SelectionDAG &DAG, TargetLowering &TLI) { unsigned NumElems = PermMask.getNumOperands(); unsigned NewWidth = (NumElems == 4) ? 2 : 4; @@ -3587,18 +3587,18 @@ SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, NewVT = MVT::v2f64; } unsigned Scale = NumElems / NewWidth; - SmallVector<SDOperand, 8> MaskVec; + SmallVector<SDValue, 8> MaskVec; for (unsigned i = 0; i < NumElems; i += Scale) { unsigned StartIdx = ~0U; for (unsigned j = 0; j < Scale; ++j) { - SDOperand Elt = PermMask.getOperand(i+j); + SDValue Elt = PermMask.getOperand(i+j); if (Elt.getOpcode() == ISD::UNDEF) continue; unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue(); if (StartIdx == ~0U) StartIdx = EltIdx - (EltIdx % Scale); if (EltIdx != StartIdx + j) - return SDOperand(); + return SDValue(); } if (StartIdx == ~0U) MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEltVT)); @@ -3615,8 +3615,8 @@ SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2, /// getVZextMovL - Return a zero-extending vector move low node. /// -static SDOperand getVZextMovL(MVT VT, MVT OpVT, - SDOperand SrcOp, SelectionDAG &DAG, +static SDValue getVZextMovL(MVT VT, MVT OpVT, + SDValue SrcOp, SelectionDAG &DAG, const X86Subtarget *Subtarget) { if (VT == MVT::v2f64 || VT == MVT::v4f32) { LoadSDNode *LD = NULL; @@ -3647,18 +3647,18 @@ static SDOperand getVZextMovL(MVT VT, MVT OpVT, /// LowerVECTOR_SHUFFLE_4wide - Handle all 4 wide cases with a number of /// shuffles. -static SDOperand -LowerVECTOR_SHUFFLE_4wide(SDOperand V1, SDOperand V2, - SDOperand PermMask, MVT VT, SelectionDAG &DAG) { +static SDValue +LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2, + SDValue PermMask, MVT VT, SelectionDAG &DAG) { MVT MaskVT = PermMask.getValueType(); MVT MaskEVT = MaskVT.getVectorElementType(); SmallVector<std::pair<int, int>, 8> Locs; Locs.reserve(4); - SmallVector<SDOperand, 8> Mask1(4, DAG.getNode(ISD::UNDEF, MaskEVT)); + SmallVector<SDValue, 8> Mask1(4, DAG.getNode(ISD::UNDEF, MaskEVT)); unsigned NumHi = 0; unsigned NumLo = 0; for (unsigned i = 0; i != 4; ++i) { - SDOperand Elt = PermMask.getOperand(i); + SDValue Elt = PermMask.getOperand(i); if (Elt.getOpcode() == ISD::UNDEF) { Locs[i] = std::make_pair(-1, -1); } else { @@ -3685,7 +3685,7 @@ LowerVECTOR_SHUFFLE_4wide(SDOperand V1, SDOperand V2, DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &Mask1[0], Mask1.size())); - SmallVector<SDOperand, 8> Mask2(4, DAG.getNode(ISD::UNDEF, MaskEVT)); + SmallVector<SDValue, 8> Mask2(4, DAG.getNode(ISD::UNDEF, MaskEVT)); for (unsigned i = 0; i != 4; ++i) { if (Locs[i].first == -1) continue; @@ -3716,7 +3716,7 @@ LowerVECTOR_SHUFFLE_4wide(SDOperand V1, SDOperand V2, // Find the element from V2. unsigned HiIndex; for (HiIndex = 0; HiIndex < 3; ++HiIndex) { - SDOperand Elt = PermMask.getOperand(HiIndex); + SDValue Elt = PermMask.getOperand(HiIndex); if (Elt.getOpcode() == ISD::UNDEF) continue; unsigned Val = cast<ConstantSDNode>(Elt)->getValue(); @@ -3756,9 +3756,9 @@ LowerVECTOR_SHUFFLE_4wide(SDOperand V1, SDOperand V2, // Break it into (shuffle shuffle_hi, shuffle_lo). Locs.clear(); - SmallVector<SDOperand,8> LoMask(4, DAG.getNode(ISD::UNDEF, MaskEVT)); - SmallVector<SDOperand,8> HiMask(4, DAG.getNode(ISD::UNDEF, MaskEVT)); - SmallVector<SDOperand,8> *MaskPtr = &LoMask; + SmallVector<SDValue,8> LoMask(4, DAG.getNode(ISD::UNDEF, MaskEVT)); + SmallVector<SDValue,8> HiMask(4, DAG.getNode(ISD::UNDEF, MaskEVT)); + SmallVector<SDValue,8> *MaskPtr = &LoMask; unsigned MaskIdx = 0; unsigned LoIdx = 0; unsigned HiIdx = 2; @@ -3769,7 +3769,7 @@ LowerVECTOR_SHUFFLE_4wide(SDOperand V1, SDOperand V2, LoIdx = 0; HiIdx = 2; } - SDOperand Elt = PermMask.getOperand(i); + SDValue Elt = PermMask.getOperand(i); if (Elt.getOpcode() == ISD::UNDEF) { Locs[i] = std::make_pair(-1, -1); } else if (cast<ConstantSDNode>(Elt)->getValue() < 4) { @@ -3783,13 +3783,13 @@ LowerVECTOR_SHUFFLE_4wide(SDOperand V1, SDOperand V2, } } - SDOperand LoShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, + SDValue LoShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &LoMask[0], LoMask.size())); - SDOperand HiShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, + SDValue HiShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &HiMask[0], HiMask.size())); - SmallVector<SDOperand, 8> MaskOps; + SmallVector<SDValue, 8> MaskOps; for (unsigned i = 0; i != 4; ++i) { if (Locs[i].first == -1) { MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT)); @@ -3803,11 +3803,11 @@ LowerVECTOR_SHUFFLE_4wide(SDOperand V1, SDOperand V2, &MaskOps[0], MaskOps.size())); } -SDOperand -X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { - SDOperand V1 = Op.getOperand(0); - SDOperand V2 = Op.getOperand(1); - SDOperand PermMask = Op.getOperand(2); +SDValue +X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { + SDValue V1 = Op.getOperand(0); + SDValue V2 = Op.getOperand(1); + SDValue PermMask = Op.getOperand(2); MVT VT = Op.getValueType(); unsigned NumElems = PermMask.getNumOperands(); bool isMMX = VT.getSizeInBits() == 64; @@ -3836,26 +3836,26 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // If the shuffle can be profitably rewritten as a narrower shuffle, then // do it! if (VT == MVT::v8i16 || VT == MVT::v16i8) { - SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); + SDValue NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); if (NewOp.Val) return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG)); } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { // FIXME: Figure out a cleaner way to do this. // Try to make use of movq to zero out the top part. if (ISD::isBuildVectorAllZeros(V2.Val)) { - SDOperand NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, + SDValue NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); if (NewOp.Val) { - SDOperand NewV1 = NewOp.getOperand(0); - SDOperand NewV2 = NewOp.getOperand(1); - SDOperand NewMask = NewOp.getOperand(2); + SDValue NewV1 = NewOp.getOperand(0); + SDValue NewV2 = NewOp.getOperand(1); + SDValue NewMask = NewOp.getOperand(2); if (isCommutedMOVL(NewMask.Val, true, false)) { NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG); return getVZextMovL(VT, NewOp.getValueType(), NewV2, DAG, Subtarget); } } } else if (ISD::isBuildVectorAllZeros(V1.Val)) { - SDOperand NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, + SDValue NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this); if (NewOp.Val && X86::isMOVLMask(NewOp.getOperand(2).Val)) return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1), @@ -3866,7 +3866,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // Check if this can be converted into a logical shift. bool isLeft = false; unsigned ShAmt = 0; - SDOperand ShVal; + SDValue ShVal; bool isShift = isVectorShift(Op, PermMask, DAG, isLeft, ShVal, ShAmt); if (isShift && ShVal.hasOneUse()) { // If the shifted value has multiple uses, it may be cheaper to use @@ -3925,7 +3925,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // V2 is a splat, so the mask may be malformed. That is, it may point // to any V2 element. The instruction selectior won't like this. Get // a corrected mask and commute to form a proper MOVS{S|D}. - SDOperand NewMask = getMOVLMask(NumElems, DAG); + SDValue NewMask = getMOVLMask(NumElems, DAG); if (NewMask.Val != PermMask.Val) Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); } @@ -3942,13 +3942,13 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // Normalize mask so all entries that point to V2 points to its first // element then try to match unpck{h|l} again. If match, return a // new vector_shuffle with the corrected mask. - SDOperand NewMask = NormalizeMask(PermMask, DAG); + SDValue NewMask = NormalizeMask(PermMask, DAG); if (NewMask.Val != PermMask.Val) { if (X86::isUNPCKLMask(PermMask.Val, true)) { - SDOperand NewMask = getUnpacklMask(NumElems, DAG); + SDValue NewMask = getUnpacklMask(NumElems, DAG); return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); } else if (X86::isUNPCKHMask(PermMask.Val, true)) { - SDOperand NewMask = getUnpackhMask(NumElems, DAG); + SDValue NewMask = getUnpackhMask(NumElems, DAG); return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask); } } @@ -4005,7 +4005,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { // Handle v8i16 specifically since SSE can do byte extraction and insertion. if (VT == MVT::v8i16) { - SDOperand NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); + SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this); if (NewOp.Val) return NewOp; } @@ -4014,23 +4014,23 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG) { if (NumElems == 4 && !isMMX) return LowerVECTOR_SHUFFLE_4wide(V1, V2, PermMask, VT, DAG); - return SDOperand(); + return SDValue(); } -SDOperand -X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, +SDValue +X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); if (VT.getSizeInBits() == 8) { - SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, + SDValue Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32, Op.getOperand(0), Op.getOperand(1)); - SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, + SDValue Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, VT, Assert); } else if (VT.getSizeInBits() == 16) { - SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, + SDValue Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32, Op.getOperand(0), Op.getOperand(1)); - SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, + SDValue Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, VT, Assert); } else if (VT == MVT::f32) { @@ -4038,28 +4038,28 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, // the result back to FR32 register. It's only worth matching if the // result has a single use which is a store or a bitcast to i32. if (!Op.hasOneUse()) - return SDOperand(); + return SDValue(); SDNode *User = *Op.Val->use_begin(); if (User->getOpcode() != ISD::STORE && (User->getOpcode() != ISD::BIT_CONVERT || User->getValueType(0) != MVT::i32)) - return SDOperand(); - SDOperand Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, + return SDValue(); + SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32, DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Op.getOperand(0)), Op.getOperand(1)); return DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Extract); } - return SDOperand(); + return SDValue(); } -SDOperand -X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { if (!isa<ConstantSDNode>(Op.getOperand(1))) - return SDOperand(); + return SDValue(); if (Subtarget->hasSSE41()) { - SDOperand Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); + SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG); if (Res.Val) return Res; } @@ -4067,7 +4067,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); // TODO: handle v16i8. if (VT.getSizeInBits() == 16) { - SDOperand Vec = Op.getOperand(0); + SDValue Vec = Op.getOperand(0); unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue(); if (Idx == 0) return DAG.getNode(ISD::TRUNCATE, MVT::i16, @@ -4076,9 +4076,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { Op.getOperand(1))); // Transform it so it match pextrw which produces a 32-bit result. MVT EVT = (MVT::SimpleValueType)(VT.getSimpleVT()+1); - SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT, + SDValue Extract = DAG.getNode(X86ISD::PEXTRW, EVT, Op.getOperand(0), Op.getOperand(1)); - SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, + SDValue Assert = DAG.getNode(ISD::AssertZext, EVT, Extract, DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, VT, Assert); } else if (VT.getSizeInBits() == 32) { @@ -4087,7 +4087,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { return Op; // SHUFPS the element to the lowest double word, then movss. MVT MaskVT = MVT::getIntVectorWithNumElements(4); - SmallVector<SDOperand, 8> IdxVec; + SmallVector<SDValue, 8> IdxVec; IdxVec. push_back(DAG.getConstant(Idx, MaskVT.getVectorElementType())); IdxVec. @@ -4096,9 +4096,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); IdxVec. push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); - SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &IdxVec[0], IdxVec.size()); - SDOperand Vec = Op.getOperand(0); + SDValue Vec = Op.getOperand(0); Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, @@ -4115,30 +4115,30 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { // Note if the lower 64 bits of the result of the UNPCKHPD is then stored // to a f64mem, the whole operation is folded into a single MOVHPDmr. MVT MaskVT = MVT::getIntVectorWithNumElements(2); - SmallVector<SDOperand, 8> IdxVec; + SmallVector<SDValue, 8> IdxVec; IdxVec.push_back(DAG.getConstant(1, MaskVT.getVectorElementType())); IdxVec. push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType())); - SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, + SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &IdxVec[0], IdxVec.size()); - SDOperand Vec = Op.getOperand(0); + SDValue Vec = Op.getOperand(0); Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(), Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec, DAG.getIntPtrConstant(0)); } - return SDOperand(); + return SDValue(); } -SDOperand -X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ +SDValue +X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG){ MVT VT = Op.getValueType(); MVT EVT = VT.getVectorElementType(); - SDOperand N0 = Op.getOperand(0); - SDOperand N1 = Op.getOperand(1); - SDOperand N2 = Op.getOperand(2); + SDValue N0 = Op.getOperand(0); + SDValue N1 = Op.getOperand(1); + SDValue N2 = Op.getOperand(2); if ((EVT.getSizeInBits() == 8) || (EVT.getSizeInBits() == 16)) { unsigned Opc = (EVT.getSizeInBits() == 8) ? X86ISD::PINSRB @@ -4162,11 +4162,11 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){ N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue() << 4); return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2); } - return SDOperand(); + return SDValue(); } -SDOperand -X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); MVT EVT = VT.getVectorElementType(); @@ -4174,11 +4174,11 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG); if (EVT == MVT::i8) - return SDOperand(); + return SDValue(); - SDOperand N0 = Op.getOperand(0); - SDOperand N1 = Op.getOperand(1); - SDOperand N2 = Op.getOperand(2); + SDValue N0 = Op.getOperand(0); + SDValue N1 = Op.getOperand(1); + SDValue N2 = Op.getOperand(2); if (EVT.getSizeInBits() == 16) { // Transform it so it match pinsrw which expects a 16-bit value in a GR32 @@ -4189,18 +4189,18 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) { N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getValue()); return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2); } - return SDOperand(); + return SDValue(); } -SDOperand -X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { if (Op.getValueType() == MVT::v2f32) return DAG.getNode(ISD::BIT_CONVERT, MVT::v2f32, DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2i32, DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op.getOperand(0)))); - SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); + SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0)); MVT VT = MVT::v2i32; switch (Op.getValueType().getSimpleVT()) { default: break; @@ -4219,10 +4219,10 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) { // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only // be used to form addressing mode. These wrapped nodes will be selected // into MOV32ri. -SDOperand -X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) { ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); - SDOperand Result = DAG.getTargetConstantPool(CP->getConstVal(), + SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), CP->getAlignment()); Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); @@ -4237,10 +4237,10 @@ X86TargetLowering::LowerConstantPool(SDOperand Op, SelectionDAG &DAG) { return Result; } -SDOperand -X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) { GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); - SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); + SDValue Result = DAG.getTargetGlobalAddress(GV, getPointerTy()); Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); // With PIC, the address is actually $g + Offset. if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && @@ -4263,22 +4263,22 @@ X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) { } // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit -static SDOperand +static SDValue LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, const MVT PtrVT) { - SDOperand InFlag; - SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, + SDValue InFlag; + SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX, DAG.getNode(X86ISD::GlobalBaseReg, PtrVT), InFlag); InFlag = Chain.getValue(1); // emit leal symbol@TLSGD(,%ebx,1), %eax SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); - SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), + SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), GA->getOffset()); - SDOperand Ops[] = { Chain, TGA, InFlag }; - SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); + SDValue Ops[] = { Chain, TGA, InFlag }; + SDValue Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3); InFlag = Result.getValue(2); Chain = Result.getValue(1); @@ -4288,7 +4288,7 @@ LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, InFlag = Chain.getValue(1); NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); - SDOperand Ops1[] = { Chain, + SDValue Ops1[] = { Chain, DAG.getTargetExternalSymbol("___tls_get_addr", PtrVT), DAG.getRegister(X86::EAX, PtrVT), @@ -4301,18 +4301,18 @@ LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, } // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit -static SDOperand +static SDValue LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, const MVT PtrVT) { - SDOperand InFlag, Chain; + SDValue InFlag, Chain; // emit leaq symbol@TLSGD(%rip), %rdi SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag); - SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), + SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), GA->getOffset()); - SDOperand Ops[] = { DAG.getEntryNode(), TGA}; - SDOperand Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 2); + SDValue Ops[] = { DAG.getEntryNode(), TGA}; + SDValue Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 2); Chain = Result.getValue(1); InFlag = Result.getValue(2); @@ -4322,7 +4322,7 @@ LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, InFlag = Chain.getValue(1); NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); - SDOperand Ops1[] = { Chain, + SDValue Ops1[] = { Chain, DAG.getTargetExternalSymbol("___tls_get_addr", PtrVT), DAG.getRegister(X86::RDI, PtrVT), @@ -4335,16 +4335,16 @@ LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, // Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or // "local exec" model. -static SDOperand LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, +static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, const MVT PtrVT) { // Get the Thread Pointer - SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); + SDValue ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT); // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial // exec) - SDOperand TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), + SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), GA->getOffset()); - SDOperand Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); + SDValue Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA); if (GA->getGlobal()->isDeclaration()) // initial exec TLS model Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset, @@ -4355,8 +4355,8 @@ static SDOperand LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset); } -SDOperand -X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) { // TODO: implement the "local dynamic" model // TODO: implement the "initial exec"model for pic executables assert(Subtarget->isTargetELF() && @@ -4374,10 +4374,10 @@ X86TargetLowering::LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG) { } } -SDOperand -X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) { const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); - SDOperand Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); + SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy()); Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); // With PIC, the address is actually $g + Offset. if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && @@ -4390,9 +4390,9 @@ X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) { return Result; } -SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) { JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); - SDOperand Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); + SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()); Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result); // With PIC, the address is actually $g + Offset. if (getTargetMachine().getRelocationModel() == Reloc::PIC_ && @@ -4407,19 +4407,19 @@ SDOperand X86TargetLowering::LowerJumpTable(SDOperand Op, SelectionDAG &DAG) { /// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and /// take a 2 x i32 value to shift plus a shift amount. -SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) { assert(Op.getNumOperands() == 3 && "Not a double-shift!"); MVT VT = Op.getValueType(); unsigned VTBits = VT.getSizeInBits(); bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; - SDOperand ShOpLo = Op.getOperand(0); - SDOperand ShOpHi = Op.getOperand(1); - SDOperand ShAmt = Op.getOperand(2); - SDOperand Tmp1 = isSRA ? + SDValue ShOpLo = Op.getOperand(0); + SDValue ShOpHi = Op.getOperand(1); + SDValue ShAmt = Op.getOperand(2); + SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, VT, ShOpHi, DAG.getConstant(VTBits - 1, MVT::i8)) : DAG.getConstant(0, VT); - SDOperand Tmp2, Tmp3; + SDValue Tmp2, Tmp3; if (Op.getOpcode() == ISD::SHL_PARTS) { Tmp2 = DAG.getNode(X86ISD::SHLD, VT, ShOpHi, ShOpLo, ShAmt); Tmp3 = DAG.getNode(ISD::SHL, VT, ShOpLo, ShAmt); @@ -4428,15 +4428,15 @@ SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt); } - SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, + SDValue AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt, DAG.getConstant(VTBits, MVT::i8)); - SDOperand Cond = DAG.getNode(X86ISD::CMP, VT, + SDValue Cond = DAG.getNode(X86ISD::CMP, VT, AndNode, DAG.getConstant(0, MVT::i8)); - SDOperand Hi, Lo; - SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8); - SDOperand Ops0[4] = { Tmp2, Tmp3, CC, Cond }; - SDOperand Ops1[4] = { Tmp3, Tmp1, CC, Cond }; + SDValue Hi, Lo; + SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8); + SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond }; + SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond }; if (Op.getOpcode() == ISD::SHL_PARTS) { Hi = DAG.getNode(X86ISD::CMOV, VT, Ops0, 4); @@ -4446,27 +4446,27 @@ SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) { Hi = DAG.getNode(X86ISD::CMOV, VT, Ops1, 4); } - SDOperand Ops[2] = { Lo, Hi }; + SDValue Ops[2] = { Lo, Hi }; return DAG.getMergeValues(Ops, 2); } -SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) { MVT SrcVT = Op.getOperand(0).getValueType(); assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 && "Unknown SINT_TO_FP to lower!"); // These are really Legal; caller falls through into that case. if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType())) - return SDOperand(); + return SDValue(); if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 && Subtarget->is64Bit()) - return SDOperand(); + return SDValue(); unsigned Size = SrcVT.getSizeInBits()/8; MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size); - SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); - SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), + SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); + SDValue Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0), StackSlot, PseudoSourceValue::getFixedStack(SSFI), 0); @@ -4477,25 +4477,25 @@ SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag); else Tys = DAG.getVTList(Op.getValueType(), MVT::Other); - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; Ops.push_back(Chain); Ops.push_back(StackSlot); Ops.push_back(DAG.getValueType(SrcVT)); - SDOperand Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, + SDValue Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, Tys, &Ops[0], Ops.size()); if (useSSE) { Chain = Result.getValue(1); - SDOperand InFlag = Result.getValue(2); + SDValue InFlag = Result.getValue(2); // FIXME: Currently the FST is flagged to the FILD_FLAG. This // shouldn't be necessary except that RFP cannot be live across // multiple blocks. When stackifier is fixed, they can be uncoupled. MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8); - SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); + SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); Tys = DAG.getVTList(MVT::Other); - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; Ops.push_back(Chain); Ops.push_back(Result); Ops.push_back(StackSlot); @@ -4509,8 +4509,8 @@ SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) { return Result; } -std::pair<SDOperand,SDOperand> X86TargetLowering:: -FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { +std::pair<SDValue,SDValue> X86TargetLowering:: +FP_TO_SINTHelper(SDValue Op, SelectionDAG &DAG) { assert(Op.getValueType().getSimpleVT() <= MVT::i64 && Op.getValueType().getSimpleVT() >= MVT::i16 && "Unknown FP_TO_SINT to lower!"); @@ -4518,18 +4518,18 @@ FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { // These are really Legal. if (Op.getValueType() == MVT::i32 && isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) - return std::make_pair(SDOperand(), SDOperand()); + return std::make_pair(SDValue(), SDValue()); if (Subtarget->is64Bit() && Op.getValueType() == MVT::i64 && Op.getOperand(0).getValueType() != MVT::f80) - return std::make_pair(SDOperand(), SDOperand()); + return std::make_pair(SDValue(), SDValue()); // We lower FP->sint64 into FISTP64, followed by a load, all to a temporary // stack slot. MachineFunction &MF = DAG.getMachineFunction(); unsigned MemSize = Op.getValueType().getSizeInBits()/8; int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize); - SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); + SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); unsigned Opc; switch (Op.getValueType().getSimpleVT()) { default: assert(0 && "Invalid FP_TO_SINT to lower!"); @@ -4538,14 +4538,14 @@ FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; } - SDOperand Chain = DAG.getEntryNode(); - SDOperand Value = Op.getOperand(0); + SDValue Chain = DAG.getEntryNode(); + SDValue Value = Op.getOperand(0); if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) { assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!"); Chain = DAG.getStore(Chain, Value, StackSlot, PseudoSourceValue::getFixedStack(SSFI), 0); SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); - SDOperand Ops[] = { + SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType()) }; Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3); @@ -4555,30 +4555,30 @@ FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) { } // Build the FP_TO_INT*_IN_MEM - SDOperand Ops[] = { Chain, Value, StackSlot }; - SDOperand FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); + SDValue Ops[] = { Chain, Value, StackSlot }; + SDValue FIST = DAG.getNode(Opc, MVT::Other, Ops, 3); return std::make_pair(FIST, StackSlot); } -SDOperand X86TargetLowering::LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) { - std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(Op, DAG); - SDOperand FIST = Vals.first, StackSlot = Vals.second; - if (FIST.Val == 0) return SDOperand(); +SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { + std::pair<SDValue,SDValue> Vals = FP_TO_SINTHelper(Op, DAG); + SDValue FIST = Vals.first, StackSlot = Vals.second; + if (FIST.Val == 0) return SDValue(); // Load the result. return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0); } SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { - std::pair<SDOperand,SDOperand> Vals = FP_TO_SINTHelper(SDOperand(N, 0), DAG); - SDOperand FIST = Vals.first, StackSlot = Vals.second; + std::pair<SDValue,SDValue> Vals = FP_TO_SINTHelper(SDValue(N, 0), DAG); + SDValue FIST = Vals.first, StackSlot = Vals.second; if (FIST.Val == 0) return 0; MVT VT = N->getValueType(0); // Return a load from the stack slot. - SDOperand Res = DAG.getLoad(VT, FIST, StackSlot, NULL, 0); + SDValue Res = DAG.getLoad(VT, FIST, StackSlot, NULL, 0); // Use MERGE_VALUES to drop the chain result value and get a node with one // result. This requires turning off getMergeValues simplification, since @@ -4586,7 +4586,7 @@ SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) { return DAG.getMergeValues(&Res, 1, false).Val; } -SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); MVT EltVT = VT; if (VT.isVector()) @@ -4604,14 +4604,14 @@ SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) { CV.push_back(C); } Constant *C = ConstantVector::get(CV); - SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); - SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, + SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); + SDValue Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0, false, 16); return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask); } -SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); MVT EltVT = VT; unsigned EltNum = 1; @@ -4632,8 +4632,8 @@ SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { CV.push_back(C); } Constant *C = ConstantVector::get(CV); - SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); - SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, + SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); + SDValue Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0, false, 16); if (VT.isVector()) { @@ -4646,9 +4646,9 @@ SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) { } } -SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { - SDOperand Op0 = Op.getOperand(0); - SDOperand Op1 = Op.getOperand(1); +SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) { + SDValue Op0 = Op.getOperand(0); + SDValue Op1 = Op.getOperand(1); MVT VT = Op.getValueType(); MVT SrcVT = Op1.getValueType(); @@ -4678,11 +4678,11 @@ SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { CV.push_back(ConstantFP::get(APFloat(APInt(32, 0)))); } Constant *C = ConstantVector::get(CV); - SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); - SDOperand Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, + SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); + SDValue Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0, false, 16); - SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); + SDValue SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1); // Shift sign bit right or left if the two operands have different types. if (SrcVT.bitsGT(VT)) { @@ -4708,21 +4708,21 @@ SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) { } C = ConstantVector::get(CV); CPIdx = DAG.getConstantPool(C, getPointerTy(), 4); - SDOperand Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, + SDValue Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx, PseudoSourceValue::getConstantPool(), 0, false, 16); - SDOperand Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); + SDValue Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2); // Or the value with the sign bit. return DAG.getNode(X86ISD::FOR, VT, Val, SignBit); } -SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) { assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer"); - SDOperand Cond; - SDOperand Op0 = Op.getOperand(0); - SDOperand Op1 = Op.getOperand(1); - SDOperand CC = Op.getOperand(2); + SDValue Cond; + SDValue Op0 = Op.getOperand(0); + SDValue Op1 = Op.getOperand(1); + SDValue CC = Op.getOperand(2); ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); unsigned X86CC; @@ -4740,27 +4740,27 @@ SDOperand X86TargetLowering::LowerSETCC(SDOperand Op, SelectionDAG &DAG) { switch (SetCCOpcode) { default: assert(false && "Illegal floating point SetCC!"); case ISD::SETOEQ: { // !PF & ZF - SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, + SDValue Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86::COND_NP, MVT::i8), Cond); - SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, + SDValue Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86::COND_E, MVT::i8), Cond); return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2); } case ISD::SETUNE: { // PF | !ZF - SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, + SDValue Tmp1 = DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86::COND_P, MVT::i8), Cond); - SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, + SDValue Tmp2 = DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86::COND_NE, MVT::i8), Cond); return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2); } } } -SDOperand X86TargetLowering::LowerVSETCC(SDOperand Op, SelectionDAG &DAG) { - SDOperand Cond; - SDOperand Op0 = Op.getOperand(0); - SDOperand Op1 = Op.getOperand(1); - SDOperand CC = Op.getOperand(2); +SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) { + SDValue Cond; + SDValue Op0 = Op.getOperand(0); + SDValue Op1 = Op.getOperand(1); + SDValue CC = Op.getOperand(2); MVT VT = Op.getValueType(); ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); bool isFP = Op.getOperand(1).getValueType().isFloatingPoint(); @@ -4798,13 +4798,13 @@ SDOperand X86TargetLowering::LowerVSETCC(SDOperand Op, SelectionDAG &DAG) { // In the two special cases we can't handle, emit two comparisons. if (SSECC == 8) { if (SetCCOpcode == ISD::SETUEQ) { - SDOperand UNORD, EQ; + SDValue UNORD, EQ; UNORD = DAG.getNode(Opc, VT, Op0, Op1, DAG.getConstant(3, MVT::i8)); EQ = DAG.getNode(Opc, VT, Op0, Op1, DAG.getConstant(0, MVT::i8)); return DAG.getNode(ISD::OR, VT, UNORD, EQ); } else if (SetCCOpcode == ISD::SETONE) { - SDOperand ORD, NEQ; + SDValue ORD, NEQ; ORD = DAG.getNode(Opc, VT, Op0, Op1, DAG.getConstant(7, MVT::i8)); NEQ = DAG.getNode(Opc, VT, Op0, Op1, DAG.getConstant(4, MVT::i8)); return DAG.getNode(ISD::AND, VT, ORD, NEQ); @@ -4849,32 +4849,32 @@ SDOperand X86TargetLowering::LowerVSETCC(SDOperand Op, SelectionDAG &DAG) { // bits of the inputs before performing those operations. if (FlipSigns) { MVT EltVT = VT.getVectorElementType(); - SDOperand SignBit = DAG.getConstant(EltVT.getIntegerVTSignBit(), EltVT); - std::vector<SDOperand> SignBits(VT.getVectorNumElements(), SignBit); - SDOperand SignVec = DAG.getNode(ISD::BUILD_VECTOR, VT, &SignBits[0], + SDValue SignBit = DAG.getConstant(EltVT.getIntegerVTSignBit(), EltVT); + std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit); + SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, VT, &SignBits[0], SignBits.size()); Op0 = DAG.getNode(ISD::XOR, VT, Op0, SignVec); Op1 = DAG.getNode(ISD::XOR, VT, Op1, SignVec); } - SDOperand Result = DAG.getNode(Opc, VT, Op0, Op1); + SDValue Result = DAG.getNode(Opc, VT, Op0, Op1); // If the logical-not of the result is required, perform that now. if (Invert) { MVT EltVT = VT.getVectorElementType(); - SDOperand NegOne = DAG.getConstant(EltVT.getIntegerVTBitMask(), EltVT); - std::vector<SDOperand> NegOnes(VT.getVectorNumElements(), NegOne); - SDOperand NegOneV = DAG.getNode(ISD::BUILD_VECTOR, VT, &NegOnes[0], + SDValue NegOne = DAG.getConstant(EltVT.getIntegerVTBitMask(), EltVT); + std::vector<SDValue> NegOnes(VT.getVectorNumElements(), NegOne); + SDValue NegOneV = DAG.getNode(ISD::BUILD_VECTOR, VT, &NegOnes[0], NegOnes.size()); Result = DAG.getNode(ISD::XOR, VT, Result, NegOneV); } return Result; } -SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) { bool addTest = true; - SDOperand Cond = Op.getOperand(0); - SDOperand CC; + SDValue Cond = Op.getOperand(0); + SDValue CC; if (Cond.getOpcode() == ISD::SETCC) Cond = LowerSETCC(Cond, DAG); @@ -4884,7 +4884,7 @@ SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { if (Cond.getOpcode() == X86ISD::SETCC) { CC = Cond.getOperand(0); - SDOperand Cmp = Cond.getOperand(1); + SDValue Cmp = Cond.getOperand(1); unsigned Opc = Cmp.getOpcode(); MVT VT = Op.getValueType(); @@ -4908,7 +4908,7 @@ SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { const MVT *VTs = DAG.getNodeValueTypes(Op.getValueType(), MVT::Flag); - SmallVector<SDOperand, 4> Ops; + SmallVector<SDValue, 4> Ops; // X86ISD::CMOV means set the result (which is operand 1) to the RHS if // condition is true. Ops.push_back(Op.getOperand(2)); @@ -4918,12 +4918,12 @@ SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) { return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size()); } -SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) { bool addTest = true; - SDOperand Chain = Op.getOperand(0); - SDOperand Cond = Op.getOperand(1); - SDOperand Dest = Op.getOperand(2); - SDOperand CC; + SDValue Chain = Op.getOperand(0); + SDValue Cond = Op.getOperand(1); + SDValue Dest = Op.getOperand(2); + SDValue CC; if (Cond.getOpcode() == ISD::SETCC) Cond = LowerSETCC(Cond, DAG); @@ -4933,7 +4933,7 @@ SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { if (Cond.getOpcode() == X86ISD::SETCC) { CC = Cond.getOperand(0); - SDOperand Cmp = Cond.getOperand(1); + SDValue Cmp = Cond.getOperand(1); unsigned Opc = Cmp.getOpcode(); if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || @@ -4957,18 +4957,18 @@ SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) { // bytes in one go. Touching the stack at 4K increments is necessary to ensure // that the guard pages used by the OS virtual memory manager are allocated in // correct sequence. -SDOperand -X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, +SDValue +X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) { assert(Subtarget->isTargetCygMing() && "This should be used only on Cygwin/Mingw targets"); // Get the inputs. - SDOperand Chain = Op.getOperand(0); - SDOperand Size = Op.getOperand(1); + SDValue Chain = Op.getOperand(0); + SDValue Size = Op.getOperand(1); // FIXME: Ensure alignment here - SDOperand Flag; + SDValue Flag; MVT IntPtr = getPointerTy(); MVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32; @@ -4979,7 +4979,7 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, Flag = Chain.getValue(1); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); - SDOperand Ops[] = { Chain, + SDValue Ops[] = { Chain, DAG.getTargetExternalSymbol("_alloca", IntPtr), DAG.getRegister(X86::EAX, IntPtr), DAG.getRegister(X86StackPtr, SPTy), @@ -4994,15 +4994,15 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDOperand Op, Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1); - SDOperand Ops1[2] = { Chain.getValue(0), Chain }; + SDValue Ops1[2] = { Chain.getValue(0), Chain }; return DAG.getMergeValues(Ops1, 2); } -SDOperand +SDValue X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, - SDOperand Chain, - SDOperand Dst, SDOperand Src, - SDOperand Size, unsigned Align, + SDValue Chain, + SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, const Value *DstSV, uint64_t DstSVOff) { ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); @@ -5012,7 +5012,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, if ((Align & 3) == 0 || !ConstantSize || ConstantSize->getValue() > getSubtarget()->getMaxInlineSizeThreshold()) { - SDOperand InFlag(0, 0); + SDValue InFlag(0, 0); // Check to see if there is a specialized entry-point for memory zeroing. ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src); @@ -5027,7 +5027,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, Args.push_back(Entry); Entry.Node = Size; Args.push_back(Entry); - std::pair<SDOperand,SDOperand> CallResult = + std::pair<SDValue,SDValue> CallResult = LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C, false, DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG); @@ -5035,13 +5035,13 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, } // Otherwise have the target-independent code call memset. - return SDOperand(); + return SDValue(); } uint64_t SizeVal = ConstantSize->getValue(); - SDOperand InFlag(0, 0); + SDValue InFlag(0, 0); MVT AVT; - SDOperand Count; + SDValue Count; ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src); unsigned BytesLeft = 0; bool TwoRepStos = false; @@ -5098,7 +5098,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, InFlag = Chain.getValue(1); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; Ops.push_back(Chain); Ops.push_back(DAG.getValueType(AVT)); Ops.push_back(InFlag); @@ -5108,7 +5108,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, InFlag = Chain.getValue(1); Count = Size; MVT CVT = Count.getValueType(); - SDOperand Left = DAG.getNode(ISD::AND, CVT, Count, + SDValue Left = DAG.getNode(ISD::AND, CVT, Count, DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT)); Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX, Left, InFlag); @@ -5137,11 +5137,11 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, return Chain; } -SDOperand +SDValue X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, - SDOperand Chain, - SDOperand Dst, SDOperand Src, - SDOperand Size, unsigned Align, + SDValue Chain, + SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, bool AlwaysInline, const Value *DstSV, uint64_t DstSVOff, const Value *SrcSV, uint64_t SrcSVOff){ @@ -5150,10 +5150,10 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, // within a subtarget-specific limit. ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); if (!ConstantSize) - return SDOperand(); + return SDValue(); uint64_t SizeVal = ConstantSize->getValue(); if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold()) - return SDOperand(); + return SDValue(); MVT AVT; unsigned BytesLeft = 0; @@ -5168,10 +5168,10 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, unsigned UBytes = AVT.getSizeInBits() / 8; unsigned CountVal = SizeVal / UBytes; - SDOperand Count = DAG.getIntPtrConstant(CountVal); + SDValue Count = DAG.getIntPtrConstant(CountVal); BytesLeft = SizeVal % UBytes; - SDOperand InFlag(0, 0); + SDValue InFlag(0, 0); Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX, Count, InFlag); InFlag = Chain.getValue(1); @@ -5183,13 +5183,13 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, InFlag = Chain.getValue(1); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; Ops.push_back(Chain); Ops.push_back(DAG.getValueType(AVT)); Ops.push_back(InFlag); - SDOperand RepMovs = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); + SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size()); - SmallVector<SDOperand, 4> Results; + SmallVector<SDValue, 4> Results; Results.push_back(RepMovs); if (BytesLeft) { // Handle the last 1 - 7 bytes. @@ -5214,26 +5214,26 @@ X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG, /// Expand the result of: i64,outchain = READCYCLECOUNTER inchain SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); - SDOperand TheChain = N->getOperand(0); - SDOperand rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); + SDValue TheChain = N->getOperand(0); + SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1); if (Subtarget->is64Bit()) { - SDOperand rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); - SDOperand rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, + SDValue rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1)); + SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX, MVT::i64, rax.getValue(2)); - SDOperand Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, + SDValue Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx, DAG.getConstant(32, MVT::i8)); - SDOperand Ops[] = { + SDValue Ops[] = { DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1) }; return DAG.getMergeValues(Ops, 2).Val; } - SDOperand eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); - SDOperand edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, + SDValue eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1)); + SDValue edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX, MVT::i32, eax.getValue(2)); // Use a buildpair to merge the two 32-bit values into a 64-bit one. - SDOperand Ops[] = { eax, edx }; + SDValue Ops[] = { eax, edx }; Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2); // Use a MERGE_VALUES to return the value and chain. @@ -5241,13 +5241,13 @@ SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){ return DAG.getMergeValues(Ops, 2).Val; } -SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) { const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); if (!Subtarget->is64Bit()) { // vastart just stores the address of the VarArgsFrameIndex slot into the // memory location argument. - SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); + SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0); } @@ -5256,10 +5256,10 @@ SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { // fp_offset (48 - 48 + 8 * 16) // overflow_arg_area (point to parameters coming in memory). // reg_save_area - SmallVector<SDOperand, 8> MemOps; - SDOperand FIN = Op.getOperand(1); + SmallVector<SDValue, 8> MemOps; + SDValue FIN = Op.getOperand(1); // Store gp_offset - SDOperand Store = DAG.getStore(Op.getOperand(0), + SDValue Store = DAG.getStore(Op.getOperand(0), DAG.getConstant(VarArgsGPOffset, MVT::i32), FIN, SV, 0); MemOps.push_back(Store); @@ -5273,36 +5273,36 @@ SDOperand X86TargetLowering::LowerVASTART(SDOperand Op, SelectionDAG &DAG) { // Store ptr to overflow_arg_area FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4)); - SDOperand OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); + SDValue OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy()); Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0); MemOps.push_back(Store); // Store ptr to reg_save_area. FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8)); - SDOperand RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); + SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy()); Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0); MemOps.push_back(Store); return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size()); } -SDOperand X86TargetLowering::LowerVAARG(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) { // X86-64 va_list is a struct { i32, i32, i8*, i8* }. assert(Subtarget->is64Bit() && "This code only handles 64-bit va_arg!"); - SDOperand Chain = Op.getOperand(0); - SDOperand SrcPtr = Op.getOperand(1); - SDOperand SrcSV = Op.getOperand(2); + SDValue Chain = Op.getOperand(0); + SDValue SrcPtr = Op.getOperand(1); + SDValue SrcSV = Op.getOperand(2); assert(0 && "VAArgInst is not yet implemented for x86-64!"); abort(); - return SDOperand(); + return SDValue(); } -SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) { // X86-64 va_list is a struct { i32, i32, i8*, i8* }. assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); - SDOperand Chain = Op.getOperand(0); - SDOperand DstPtr = Op.getOperand(1); - SDOperand SrcPtr = Op.getOperand(2); + SDValue Chain = Op.getOperand(0); + SDValue DstPtr = Op.getOperand(1); + SDValue SrcPtr = Op.getOperand(2); const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); @@ -5311,11 +5311,11 @@ SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) { DstSV, 0, SrcSV, 0); } -SDOperand -X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { +SDValue +X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getValue(); switch (IntNo) { - default: return SDOperand(); // Don't custom lower most intrinsics. + default: return SDValue(); // Don't custom lower most intrinsics. // Comparison intrinsics. case Intrinsic::x86_sse_comieq_ss: case Intrinsic::x86_sse_comilt_ss: @@ -5408,12 +5408,12 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { } unsigned X86CC; - SDOperand LHS = Op.getOperand(1); - SDOperand RHS = Op.getOperand(2); + SDValue LHS = Op.getOperand(1); + SDValue RHS = Op.getOperand(2); translateX86CC(CC, true, X86CC, LHS, RHS, DAG); - SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); - SDOperand SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, + SDValue Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS); + SDValue SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8, DAG.getConstant(X86CC, MVT::i8), Cond); return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC); } @@ -5436,9 +5436,9 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { case Intrinsic::x86_mmx_psrli_q: case Intrinsic::x86_mmx_psrai_w: case Intrinsic::x86_mmx_psrai_d: { - SDOperand ShAmt = Op.getOperand(2); + SDValue ShAmt = Op.getOperand(2); if (isa<ConstantSDNode>(ShAmt)) - return SDOperand(); + return SDValue(); unsigned NewIntNo = 0; MVT ShAmtVT = MVT::v4i32; @@ -5509,49 +5509,49 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG) { } } -SDOperand X86TargetLowering::LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) { // Depths > 0 not supported yet! if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) - return SDOperand(); + return SDValue(); // Just load the return address - SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); + SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0); } -SDOperand X86TargetLowering::LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) { // Depths > 0 not supported yet! if (cast<ConstantSDNode>(Op.getOperand(0))->getValue() > 0) - return SDOperand(); + return SDValue(); - SDOperand RetAddrFI = getReturnAddressFrameIndex(DAG); + SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); return DAG.getNode(ISD::SUB, getPointerTy(), RetAddrFI, DAG.getIntPtrConstant(!Subtarget->is64Bit() ? 4 : 8)); } -SDOperand X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, +SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) { // Is not yet supported on x86-64 if (Subtarget->is64Bit()) - return SDOperand(); + return SDValue(); return DAG.getIntPtrConstant(8); } -SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) +SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) { assert(!Subtarget->is64Bit() && "Lowering of eh_return builtin is not supported yet on x86-64"); MachineFunction &MF = DAG.getMachineFunction(); - SDOperand Chain = Op.getOperand(0); - SDOperand Offset = Op.getOperand(1); - SDOperand Handler = Op.getOperand(2); + SDValue Chain = Op.getOperand(0); + SDValue Offset = Op.getOperand(1); + SDValue Handler = Op.getOperand(2); - SDOperand Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), + SDValue Frame = DAG.getRegister(RegInfo->getFrameRegister(MF), getPointerTy()); - SDOperand StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, + SDValue StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame, DAG.getIntPtrConstant(-4UL)); StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset); Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0); @@ -5562,12 +5562,12 @@ SDOperand X86TargetLowering::LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG) Chain, DAG.getRegister(X86::ECX, getPointerTy())); } -SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, +SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) { - SDOperand Root = Op.getOperand(0); - SDOperand Trmp = Op.getOperand(1); // trampoline - SDOperand FPtr = Op.getOperand(2); // nested function - SDOperand Nest = Op.getOperand(3); // 'nest' parameter value + SDValue Root = Op.getOperand(0); + SDValue Trmp = Op.getOperand(1); // trampoline + SDValue FPtr = Op.getOperand(2); // nested function + SDValue Nest = Op.getOperand(3); // 'nest' parameter value const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); @@ -5575,7 +5575,7 @@ SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); if (Subtarget->is64Bit()) { - SDOperand OutChains[6]; + SDValue OutChains[6]; // Large code-model. @@ -5589,7 +5589,7 @@ SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, // Load the pointer to the nested function into R11. unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 - SDOperand Addr = Trmp; + SDValue Addr = Trmp; OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr, TrmpAddr, 0); @@ -5617,7 +5617,7 @@ SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr, TrmpAddr, 22); - SDOperand Ops[] = + SDValue Ops[] = { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) }; return DAG.getMergeValues(Ops, 2); } else { @@ -5663,8 +5663,8 @@ SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, break; } - SDOperand OutChains[4]; - SDOperand Addr, Disp; + SDValue OutChains[4]; + SDValue Addr, Disp; Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32)); Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr); @@ -5685,13 +5685,13 @@ SDOperand X86TargetLowering::LowerTRAMPOLINE(SDOperand Op, Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32)); OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1); - SDOperand Ops[] = + SDValue Ops[] = { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) }; return DAG.getMergeValues(Ops, 2); } } -SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) { /* The rounding mode is in bits 11:10 of FPSR, and has the following settings: @@ -5719,27 +5719,27 @@ SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { // Save FP Control Word to stack slot int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment); - SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); + SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy()); - SDOperand Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, + SDValue Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other, DAG.getEntryNode(), StackSlot); // Load FP Control Word from stack slot - SDOperand CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); + SDValue CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0); // Transform as necessary - SDOperand CWD1 = + SDValue CWD1 = DAG.getNode(ISD::SRL, MVT::i16, DAG.getNode(ISD::AND, MVT::i16, CWD, DAG.getConstant(0x800, MVT::i16)), DAG.getConstant(11, MVT::i8)); - SDOperand CWD2 = + SDValue CWD2 = DAG.getNode(ISD::SRL, MVT::i16, DAG.getNode(ISD::AND, MVT::i16, CWD, DAG.getConstant(0x400, MVT::i16)), DAG.getConstant(9, MVT::i8)); - SDOperand RetVal = + SDValue RetVal = DAG.getNode(ISD::AND, MVT::i16, DAG.getNode(ISD::ADD, MVT::i16, DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2), @@ -5751,7 +5751,7 @@ SDOperand X86TargetLowering::LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG) { ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal); } -SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); MVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); @@ -5768,7 +5768,7 @@ SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { Op = DAG.getNode(X86ISD::BSR, VTs, Op); // If src is zero (i.e. bsr sets ZF), returns NumBits. - SmallVector<SDOperand, 4> Ops; + SmallVector<SDValue, 4> Ops; Ops.push_back(Op); Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT)); Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); @@ -5783,7 +5783,7 @@ SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) { return Op; } -SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) { MVT VT = Op.getValueType(); MVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); @@ -5799,7 +5799,7 @@ SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { Op = DAG.getNode(X86ISD::BSF, VTs, Op); // If src is zero (i.e. bsf sets ZF), returns NumBits. - SmallVector<SDOperand, 4> Ops; + SmallVector<SDValue, 4> Ops; Ops.push_back(Op); Ops.push_back(DAG.getConstant(NumBits, OpVT)); Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8)); @@ -5811,7 +5811,7 @@ SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) { return Op; } -SDOperand X86TargetLowering::LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) { MVT T = Op.getValueType(); unsigned Reg = 0; unsigned size = 0; @@ -5825,19 +5825,19 @@ SDOperand X86TargetLowering::LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG) { if (Subtarget->is64Bit()) { Reg = X86::RAX; size = 8; } else //Should go away when LowerType stuff lands - return SDOperand(ExpandATOMIC_CMP_SWAP(Op.Val, DAG), 0); + return SDValue(ExpandATOMIC_CMP_SWAP(Op.Val, DAG), 0); break; }; - SDOperand cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, - Op.getOperand(3), SDOperand()); - SDOperand Ops[] = { cpIn.getValue(0), + SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg, + Op.getOperand(3), SDValue()); + SDValue Ops[] = { cpIn.getValue(0), Op.getOperand(1), Op.getOperand(2), DAG.getTargetConstant(size, MVT::i8), cpIn.getValue(1) }; SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); - SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5); - SDOperand cpOut = + SDValue Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5); + SDValue cpOut = DAG.getCopyFromReg(Result.getValue(0), Reg, T, Result.getValue(1)); return cpOut; } @@ -5845,16 +5845,16 @@ SDOperand X86TargetLowering::LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG) { SDNode* X86TargetLowering::ExpandATOMIC_CMP_SWAP(SDNode* Op, SelectionDAG &DAG) { MVT T = Op->getValueType(0); assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap"); - SDOperand cpInL, cpInH; + SDValue cpInL, cpInH; cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), DAG.getConstant(0, MVT::i32)); cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3), DAG.getConstant(1, MVT::i32)); cpInL = DAG.getCopyToReg(Op->getOperand(0), X86::EAX, - cpInL, SDOperand()); + cpInL, SDValue()); cpInH = DAG.getCopyToReg(cpInL.getValue(0), X86::EDX, cpInH, cpInL.getValue(1)); - SDOperand swapInL, swapInH; + SDValue swapInL, swapInH; swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), DAG.getConstant(0, MVT::i32)); swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2), @@ -5863,25 +5863,25 @@ SDNode* X86TargetLowering::ExpandATOMIC_CMP_SWAP(SDNode* Op, SelectionDAG &DAG) swapInL, cpInH.getValue(1)); swapInH = DAG.getCopyToReg(swapInL.getValue(0), X86::ECX, swapInH, swapInL.getValue(1)); - SDOperand Ops[] = { swapInH.getValue(0), + SDValue Ops[] = { swapInH.getValue(0), Op->getOperand(1), swapInH.getValue(1)}; SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag); - SDOperand Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3); - SDOperand cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32, + SDValue Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3); + SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32, Result.getValue(1)); - SDOperand cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32, + SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32, cpOutL.getValue(2)); - SDOperand OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; - SDOperand ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2); - SDOperand Vals[2] = { ResultVal, cpOutH.getValue(1) }; + SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; + SDValue ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2); + SDValue Vals[2] = { ResultVal, cpOutH.getValue(1) }; return DAG.getMergeValues(Vals, 2).Val; } SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG) { MVT T = Op->getValueType(0); assert (T == MVT::i32 && "Only know how to expand i32 Atomic Load Sub"); - SDOperand negOp = DAG.getNode(ISD::SUB, T, + SDValue negOp = DAG.getNode(ISD::SUB, T, DAG.getConstant(0, T), Op->getOperand(2)); return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, Op->getOperand(0), Op->getOperand(1), negOp, @@ -5891,7 +5891,7 @@ SDNode* X86TargetLowering::ExpandATOMIC_LOAD_SUB(SDNode* Op, SelectionDAG &DAG) /// LowerOperation - Provide custom lowering hooks for some operations. /// -SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { +SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { switch (Op.getOpcode()) { default: assert(0 && "Should not custom lower this!"); case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); @@ -5937,7 +5937,7 @@ SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) { // FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands. case ISD::READCYCLECOUNTER: - return SDOperand(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); + return SDValue(ExpandREADCYCLECOUNTER(Op.Val, DAG), 0); } } @@ -6092,7 +6092,7 @@ bool X86TargetLowering::isTruncateFree(MVT VT1, MVT VT2) const { /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values /// are assumed to be legal. bool -X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT VT) const { +X86TargetLowering::isShuffleMaskLegal(SDValue Mask, MVT VT) const { // Only do shuffles on 128-bit vector types for now. if (VT.getSizeInBits() == 64) return false; return (Mask.Val->getNumOperands() <= 4 || @@ -6107,7 +6107,7 @@ X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT VT) const { } bool -X86TargetLowering::isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, +X86TargetLowering::isVectorClearMaskLegal(const std::vector<SDValue> &BVOps, MVT EVT, SelectionDAG &DAG) const { unsigned NumElts = BVOps.size(); // Only do shuffles on 128-bit vector types for now. @@ -6484,7 +6484,7 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, // X86 Optimization Hooks //===----------------------------------------------------------------------===// -void X86TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op, +void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -6531,21 +6531,21 @@ static bool isBaseAlignmentOfN(unsigned N, SDNode *Base, return false; } -static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask, +static bool EltsFromConsecutiveLoads(SDNode *N, SDValue PermMask, unsigned NumElems, MVT EVT, SDNode *&Base, SelectionDAG &DAG, MachineFrameInfo *MFI, const TargetLowering &TLI) { Base = NULL; for (unsigned i = 0; i < NumElems; ++i) { - SDOperand Idx = PermMask.getOperand(i); + SDValue Idx = PermMask.getOperand(i); if (Idx.getOpcode() == ISD::UNDEF) { if (!Base) return false; continue; } - SDOperand Elt = DAG.getShuffleScalarElt(N, i); + SDValue Elt = DAG.getShuffleScalarElt(N, i); if (!Elt.Val || (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.Val))) return false; @@ -6569,17 +6569,17 @@ static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask, /// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load /// if the load addresses are consecutive, non-overlapping, and in the right /// order. -static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, +static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI) { MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); MVT VT = N->getValueType(0); MVT EVT = VT.getVectorElementType(); - SDOperand PermMask = N->getOperand(2); + SDValue PermMask = N->getOperand(2); unsigned NumElems = PermMask.getNumOperands(); SDNode *Base = NULL; if (!EltsFromConsecutiveLoads(N, PermMask, NumElems, EVT, Base, DAG, MFI, TLI)) - return SDOperand(); + return SDValue(); LoadSDNode *LD = cast<LoadSDNode>(Base); if (isBaseAlignmentOfN(16, Base->getOperand(1).Val, TLI)) @@ -6591,14 +6591,14 @@ static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, } /// PerformBuildVectorCombine - build_vector 0,(load i64 / f64) -> movq / movsd. -static SDOperand PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG, +static SDValue PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget, const TargetLowering &TLI) { unsigned NumOps = N->getNumOperands(); // Ignore single operand BUILD_VECTOR. if (NumOps == 1) - return SDOperand(); + return SDValue(); MVT VT = N->getValueType(0); MVT EVT = VT.getVectorElementType(); @@ -6606,20 +6606,20 @@ static SDOperand PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG, // We are looking for load i64 and zero extend. We want to transform // it before legalizer has a chance to expand it. Also look for i64 // BUILD_PAIR bit casted to f64. - return SDOperand(); + return SDValue(); // This must be an insertion into a zero vector. - SDOperand HighElt = N->getOperand(1); + SDValue HighElt = N->getOperand(1); if (!isZeroNode(HighElt)) - return SDOperand(); + return SDValue(); // Value must be a load. SDNode *Base = N->getOperand(0).Val; if (!isa<LoadSDNode>(Base)) { if (Base->getOpcode() != ISD::BIT_CONVERT) - return SDOperand(); + return SDValue(); Base = Base->getOperand(0).Val; if (!isa<LoadSDNode>(Base)) - return SDOperand(); + return SDValue(); } // Transform it into VZEXT_LOAD addr. @@ -6627,23 +6627,23 @@ static SDOperand PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG, // Load must not be an extload. if (LD->getExtensionType() != ISD::NON_EXTLOAD) - return SDOperand(); + return SDValue(); return DAG.getNode(X86ISD::VZEXT_LOAD, VT, LD->getChain(), LD->getBasePtr()); } /// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes. -static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, +static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget) { - SDOperand Cond = N->getOperand(0); + SDValue Cond = N->getOperand(0); // If we have SSE[12] support, try to form min/max nodes. if (Subtarget->hasSSE2() && (N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) { if (Cond.getOpcode() == ISD::SETCC) { // Get the LHS/RHS of the select. - SDOperand LHS = N->getOperand(1); - SDOperand RHS = N->getOperand(2); + SDValue LHS = N->getOperand(1); + SDValue RHS = N->getOperand(2); ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); unsigned Opcode = 0; @@ -6701,11 +6701,11 @@ static SDOperand PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, } - return SDOperand(); + return SDValue(); } /// PerformSTORECombine - Do target-specific dag combines on STORE nodes. -static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, +static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget) { // Turn load->store of MMX types into GPR load/stores. This avoids clobbering // the FP state in cases where an emms may be missing. @@ -6720,7 +6720,7 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, SDNode* LdVal = St->getValue().Val; LoadSDNode *Ld = 0; int TokenFactorIndex = -1; - SmallVector<SDOperand, 8> Ops; + SmallVector<SDValue, 8> Ops; SDNode* ChainVal = St->getChain().Val; // Must be a store of a load. We currently handle two cases: the load // is a direct child, and it's under an intervening TokenFactor. It is @@ -6740,11 +6740,11 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, if (Ld) { // If we are a 64-bit capable x86, lower to a single movq load/store pair. if (Subtarget->is64Bit()) { - SDOperand NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), + SDValue NewLd = DAG.getLoad(MVT::i64, Ld->getChain(), Ld->getBasePtr(), Ld->getSrcValue(), Ld->getSrcValueOffset(), Ld->isVolatile(), Ld->getAlignment()); - SDOperand NewChain = NewLd.getValue(1); + SDValue NewChain = NewLd.getValue(1); if (TokenFactorIndex != -1) { Ops.push_back(NewChain); NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0], @@ -6756,19 +6756,19 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, } // Otherwise, lower to two 32-bit copies. - SDOperand LoAddr = Ld->getBasePtr(); - SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, + SDValue LoAddr = Ld->getBasePtr(); + SDValue HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, DAG.getConstant(4, MVT::i32)); - SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, + SDValue LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr, Ld->getSrcValue(), Ld->getSrcValueOffset(), Ld->isVolatile(), Ld->getAlignment()); - SDOperand HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr, + SDValue HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr, Ld->getSrcValue(), Ld->getSrcValueOffset()+4, Ld->isVolatile(), MinAlign(Ld->getAlignment(), 4)); - SDOperand NewChain = LoLd.getValue(1); + SDValue NewChain = LoLd.getValue(1); if (TokenFactorIndex != -1) { Ops.push_back(LoLd); Ops.push_back(HiLd); @@ -6780,22 +6780,22 @@ static SDOperand PerformSTORECombine(SDNode *N, SelectionDAG &DAG, HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr, DAG.getConstant(4, MVT::i32)); - SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr, + SDValue LoSt = DAG.getStore(NewChain, LoLd, LoAddr, St->getSrcValue(), St->getSrcValueOffset(), St->isVolatile(), St->getAlignment()); - SDOperand HiSt = DAG.getStore(NewChain, HiLd, HiAddr, + SDValue HiSt = DAG.getStore(NewChain, HiLd, HiAddr, St->getSrcValue(), St->getSrcValueOffset()+4, St->isVolatile(), MinAlign(St->getAlignment(), 4)); return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt); } } - return SDOperand(); + return SDValue(); } /// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and /// X86ISD::FXOR nodes. -static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR); // F[X]OR(0.0, x) -> x // F[X]OR(x, 0.0) -> x @@ -6805,11 +6805,11 @@ static SDOperand PerformFORCombine(SDNode *N, SelectionDAG &DAG) { if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) if (C->getValueAPF().isPosZero()) return N->getOperand(0); - return SDOperand(); + return SDValue(); } /// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. -static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { // FAND(0.0, x) -> 0.0 // FAND(x, 0.0) -> 0.0 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) @@ -6818,11 +6818,11 @@ static SDOperand PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1))) if (C->getValueAPF().isPosZero()) return N->getOperand(1); - return SDOperand(); + return SDValue(); } -SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, +SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; switch (N->getOpcode()) { @@ -6837,7 +6837,7 @@ SDOperand X86TargetLowering::PerformDAGCombine(SDNode *N, case X86ISD::FAND: return PerformFANDCombine(N, DAG); } - return SDOperand(); + return SDValue(); } //===----------------------------------------------------------------------===// @@ -6887,11 +6887,11 @@ LowerXConstraint(MVT ConstraintVT) const { /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. -void X86TargetLowering::LowerAsmOperandForConstraint(SDOperand Op, +void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, char Constraint, - std::vector<SDOperand>&Ops, + std::vector<SDValue>&Ops, SelectionDAG &DAG) const { - SDOperand Result(0, 0); + SDValue Result(0, 0); switch (Constraint) { default: break; diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 125a30dd66..8632f3c0c7 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -325,7 +325,7 @@ namespace llvm { /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC /// jumptable. - SDOperand getPICJumpTableRelocBase(SDOperand Table, + SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const; // Return the number of bytes that a function should pop when it returns (in @@ -357,7 +357,7 @@ namespace llvm { /// LowerOperation - Provide custom lowering hooks for some operations. /// - virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); /// ReplaceNodeResults - Replace a node with an illegal result type /// with a new node built out of custom code. @@ -365,7 +365,7 @@ namespace llvm { virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG); - virtual SDOperand PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB); @@ -376,12 +376,12 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; /// getSetCCResultType - Return the ISD::SETCC ValueType - virtual MVT getSetCCResultType(const SDOperand &) const; + virtual MVT getSetCCResultType(const SDValue &) const; /// computeMaskedBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the /// KnownZero/KnownOne bitsets. - virtual void computeMaskedBitsForTargetNode(const SDOperand Op, + virtual void computeMaskedBitsForTargetNode(const SDValue Op, const APInt &Mask, APInt &KnownZero, APInt &KnownOne, @@ -391,7 +391,7 @@ namespace llvm { virtual bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const; - SDOperand getReturnAddressFrameIndex(SelectionDAG &DAG); + SDValue getReturnAddressFrameIndex(SelectionDAG &DAG); ConstraintType getConstraintType(const std::string &Constraint) const; @@ -403,9 +403,9 @@ namespace llvm { /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. - virtual void LowerAsmOperandForConstraint(SDOperand Op, + virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, - std::vector<SDOperand> &Ops, + std::vector<SDValue> &Ops, SelectionDAG &DAG) const; /// getRegForInlineAsmConstraint - Given a physical register constraint @@ -430,13 +430,13 @@ namespace llvm { /// support *some* VECTOR_SHUFFLE operations, those with specific masks. /// By default, if a target supports the VECTOR_SHUFFLE node, all mask /// values are assumed to be legal. - virtual bool isShuffleMaskLegal(SDOperand Mask, MVT VT) const; + virtual bool isShuffleMaskLegal(SDValue Mask, MVT VT) const; /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is /// used by Targets can use this to indicate if there is a suitable /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant /// pool entry. - virtual bool isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps, + virtual bool isVectorClearMaskLegal(const std::vector<SDValue> &BVOps, MVT EVT, SelectionDAG &DAG) const; /// ShouldShrinkFPConstant - If true, then instruction selection should @@ -452,8 +452,8 @@ namespace llvm { /// IsEligibleForTailCallOptimization - Check whether the call is eligible /// for tail call optimization. Target which want to do tail call /// optimization should implement this function. - virtual bool IsEligibleForTailCallOptimization(SDOperand Call, - SDOperand Ret, + virtual bool IsEligibleForTailCallOptimization(SDValue Call, + SDValue Ret, SelectionDAG &DAG) const; virtual const X86Subtarget* getSubtarget() { @@ -483,87 +483,87 @@ namespace llvm { bool X86ScalarSSEf32; bool X86ScalarSSEf64; - SDNode *LowerCallResult(SDOperand Chain, SDOperand InFlag, SDNode*TheCall, + SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, SDNode*TheCall, unsigned CallingConv, SelectionDAG &DAG); - SDOperand LowerMemArgument(SDOperand Op, SelectionDAG &DAG, + SDValue LowerMemArgument(SDValue Op, SelectionDAG &DAG, const CCValAssign &VA, MachineFrameInfo *MFI, - unsigned CC, SDOperand Root, unsigned i); + unsigned CC, SDValue Root, unsigned i); - SDOperand LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG, - const SDOperand &StackPtr, - const CCValAssign &VA, SDOperand Chain, - SDOperand Arg); + SDValue LowerMemOpCallTo(SDValue Op, SelectionDAG &DAG, + const SDValue &StackPtr, + const CCValAssign &VA, SDValue Chain, + SDValue Arg); // Call lowering helpers. - bool IsCalleePop(SDOperand Op); + bool IsCalleePop(SDValue Op); bool CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall); bool CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall); - SDOperand EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDOperand &OutRetAddr, - SDOperand Chain, bool IsTailCall, bool Is64Bit, + SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, + SDValue Chain, bool IsTailCall, bool Is64Bit, int FPDiff); - CCAssignFn *CCAssignFnForNode(SDOperand Op) const; - NameDecorationStyle NameDecorationForFORMAL_ARGUMENTS(SDOperand Op); + CCAssignFn *CCAssignFnForNode(SDValue Op) const; + NameDecorationStyle NameDecorationForFORMAL_ARGUMENTS(SDValue Op); unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG); - std::pair<SDOperand,SDOperand> FP_TO_SINTHelper(SDOperand Op, + std::pair<SDValue,SDValue> FP_TO_SINTHelper(SDValue Op, SelectionDAG &DAG); - SDOperand LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerVECTOR_SHUFFLE(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerEXTRACT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerGlobalTLSAddress(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerShift(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFABS(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFNEG(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSETCC(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerVSETCC(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerSELECT(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerBRCOND(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerMEMSET(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerJumpTable(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerDYNAMIC_STACKALLOC(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerVAARG(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerVACOPY(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerINTRINSIC_WO_CHAIN(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerRETURNADDR(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFRAMEADDR(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFRAME_TO_ARGS_OFFSET(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerEH_RETURN(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerTRAMPOLINE(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerFLT_ROUNDS_(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerCTLZ(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerCTTZ(SDOperand Op, SelectionDAG &DAG); - SDOperand LowerCMP_SWAP(SDOperand Op, SelectionDAG &DAG); + SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG); + SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG); + SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG); + SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG); + SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG); + SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG); + SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG); + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); + SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG); + SDValue LowerShift(SDValue Op, SelectionDAG &DAG); + SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG); + SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG); + SDValue LowerFABS(SDValue Op, SelectionDAG &DAG); + SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG); + SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG); + SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG); + SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG); + SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG); + SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG); + SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG); + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); + SDValue LowerCALL(SDValue Op, SelectionDAG &DAG); + SDValue LowerRET(SDValue Op, SelectionDAG &DAG); + SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG); + SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG); + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG); + SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG); + SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG); + SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG); + SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG); + SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG); + SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG); + SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG); + SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG); + SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG); + SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG); + SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG); + SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG); SDNode *ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG); SDNode *ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG); SDNode *ExpandATOMIC_CMP_SWAP(SDNode *N, SelectionDAG &DAG); SDNode *ExpandATOMIC_LOAD_SUB(SDNode *N, SelectionDAG &DAG); - SDOperand EmitTargetCodeForMemset(SelectionDAG &DAG, - SDOperand Chain, - SDOperand Dst, SDOperand Src, - SDOperand Size, unsigned Align, + SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, + SDValue Chain, + SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, const Value *DstSV, uint64_t DstSVOff); - SDOperand EmitTargetCodeForMemcpy(SelectionDAG &DAG, - SDOperand Chain, - SDOperand Dst, SDOperand Src, - SDOperand Size, unsigned Align, + SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, + SDValue Chain, + SDValue Dst, SDValue Src, + SDValue Size, unsigned Align, bool AlwaysInline, const Value *DstSV, uint64_t DstSVOff, const Value *SrcSV, uint64_t SrcSVOff); diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 8cf268e4a8..61dcfa95b3 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -2262,12 +2262,12 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, const TargetOperandInfo &TOI = TID.OpInfo[Index]; const TargetRegisterClass *RC = TOI.isLookupPtrRegClass() ? getPointerRegClass() : RI.getRegClass(TOI.RegClass); - std::vector<SDOperand> AddrOps; - std::vector<SDOperand> BeforeOps; - std::vector<SDOperand> AfterOps; + std::vector<SDValue> AddrOps; + std::vector<SDValue> BeforeOps; + std::vector<SDValue> AfterOps; unsigned NumOps = N->getNumOperands(); for (unsigned i = 0; i != NumOps-1; ++i) { - SDOperand Op = N->getOperand(i); + SDValue Op = N->getOperand(i); if (i >= Index && i < Index+4) AddrOps.push_back(Op); else if (i < Index) @@ -2275,7 +2275,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, else if (i > Index) AfterOps.push_back(Op); } - SDOperand Chain = N->getOperand(NumOps-1); + SDValue Chain = N->getOperand(NumOps-1); AddrOps.push_back(Chain); // Emit the load instruction. @@ -2306,7 +2306,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, VTs.push_back(VT); } if (Load) - BeforeOps.push_back(SDOperand(Load, 0)); + BeforeOps.push_back(SDValue(Load, 0)); std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps)); SDNode *NewNode= DAG.getTargetNode(Opc, VTs, &BeforeOps[0], BeforeOps.size()); NewNodes.push_back(NewNode); @@ -2314,7 +2314,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, // Emit the store instruction. if (FoldedStore) { AddrOps.pop_back(); - AddrOps.push_back(SDOperand(NewNode, 0)); + AddrOps.push_back(SDValue(NewNode, 0)); AddrOps.push_back(Chain); bool isAligned = (RI.getStackAlignment() >= 16) || RI.needsStackRealignment(MF); diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index e5d4947465..da0d287817 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -963,7 +963,7 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS, } // Compute the constraint code and ConstraintType to use. - TLI->ComputeConstraintToUse(OpInfo, SDOperand()); + TLI->ComputeConstraintToUse(OpInfo, SDValue()); if (OpInfo.ConstraintType == TargetLowering::C_Memory && OpInfo.isIndirect) { |