diff options
Diffstat (limited to 'lib/Target/X86')
-rw-r--r-- | lib/Target/X86/X86FastISel.cpp | 27 | ||||
-rw-r--r-- | lib/Target/X86/X86FloatingPointRegKill.cpp | 5 | ||||
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 19 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrInfo.cpp | 3 |
4 files changed, 31 insertions, 23 deletions
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index b98a9738c6..5e9a39f056 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -272,7 +272,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val, const X86AddressMode &AM) { // Handle 'null' like i32/i64 0. if (isa<ConstantPointerNull>(Val)) - Val = Constant::getNullValue(TD.getIntPtrType()); + Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext())); // If this is a store of a simple constant, fold the constant into the store. if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { @@ -672,7 +672,7 @@ bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, EVT VT) { // Handle 'null' like i32/i64 0. if (isa<ConstantPointerNull>(Op1)) - Op1 = Constant::getNullValue(TD.getIntPtrType()); + Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext())); // We have two options: compare with register or immediate. If the RHS of // the compare is an immediate that we can fold into this compare, use @@ -773,8 +773,8 @@ bool X86FastISel::X86SelectCmp(Instruction *I) { bool X86FastISel::X86SelectZExt(Instruction *I) { // Handle zero-extension from i1 to i8, which is common. - if (I->getType() == Type::Int8Ty && - I->getOperand(0)->getType() == Type::Int1Ty) { + if (I->getType() == Type::getInt8Ty(I->getContext()) && + I->getOperand(0)->getType() == Type::getInt1Ty(I->getContext())) { unsigned ResultReg = getRegForValue(I->getOperand(0)); if (ResultReg == 0) return false; // Set the high bits to zero. @@ -935,7 +935,7 @@ bool X86FastISel::X86SelectBranch(Instruction *I) { bool X86FastISel::X86SelectShift(Instruction *I) { unsigned CReg = 0, OpReg = 0, OpImm = 0; const TargetRegisterClass *RC = NULL; - if (I->getType() == Type::Int8Ty) { + if (I->getType() == Type::getInt8Ty(I->getContext())) { CReg = X86::CL; RC = &X86::GR8RegClass; switch (I->getOpcode()) { @@ -944,7 +944,7 @@ bool X86FastISel::X86SelectShift(Instruction *I) { case Instruction::Shl: OpReg = X86::SHL8rCL; OpImm = X86::SHL8ri; break; default: return false; } - } else if (I->getType() == Type::Int16Ty) { + } else if (I->getType() == Type::getInt16Ty(I->getContext())) { CReg = X86::CX; RC = &X86::GR16RegClass; switch (I->getOpcode()) { @@ -953,7 +953,7 @@ bool X86FastISel::X86SelectShift(Instruction *I) { case Instruction::Shl: OpReg = X86::SHL16rCL; OpImm = X86::SHL16ri; break; default: return false; } - } else if (I->getType() == Type::Int32Ty) { + } else if (I->getType() == Type::getInt32Ty(I->getContext())) { CReg = X86::ECX; RC = &X86::GR32RegClass; switch (I->getOpcode()) { @@ -962,7 +962,7 @@ bool X86FastISel::X86SelectShift(Instruction *I) { case Instruction::Shl: OpReg = X86::SHL32rCL; OpImm = X86::SHL32ri; break; default: return false; } - } else if (I->getType() == Type::Int64Ty) { + } else if (I->getType() == Type::getInt64Ty(I->getContext())) { CReg = X86::RCX; RC = &X86::GR64RegClass; switch (I->getOpcode()) { @@ -1044,9 +1044,10 @@ bool X86FastISel::X86SelectSelect(Instruction *I) { bool X86FastISel::X86SelectFPExt(Instruction *I) { // fpext from float to double. - if (Subtarget->hasSSE2() && I->getType() == Type::DoubleTy) { + if (Subtarget->hasSSE2() && + I->getType() == Type::getDoubleTy(I->getContext())) { Value *V = I->getOperand(0); - if (V->getType() == Type::FloatTy) { + if (V->getType() == Type::getFloatTy(I->getContext())) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR64RegisterClass); @@ -1061,9 +1062,9 @@ bool X86FastISel::X86SelectFPExt(Instruction *I) { bool X86FastISel::X86SelectFPTrunc(Instruction *I) { if (Subtarget->hasSSE2()) { - if (I->getType() == Type::FloatTy) { + if (I->getType() == Type::getFloatTy(I->getContext())) { Value *V = I->getOperand(0); - if (V->getType() == Type::DoubleTy) { + if (V->getType() == Type::getDoubleTy(I->getContext())) { unsigned OpReg = getRegForValue(V); if (OpReg == 0) return false; unsigned ResultReg = createResultReg(X86::FR32RegisterClass); @@ -1230,7 +1231,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) { // Handle *simple* calls for now. const Type *RetTy = CS.getType(); EVT RetVT; - if (RetTy == Type::VoidTy) + if (RetTy == Type::getVoidTy(I->getContext())) RetVT = MVT::isVoid; else if (!isTypeLegal(RetTy, RetVT, true)) return false; diff --git a/lib/Target/X86/X86FloatingPointRegKill.cpp b/lib/Target/X86/X86FloatingPointRegKill.cpp index 292f8f432d..3e0385c79c 100644 --- a/lib/Target/X86/X86FloatingPointRegKill.cpp +++ b/lib/Target/X86/X86FloatingPointRegKill.cpp @@ -118,9 +118,10 @@ bool FPRegKiller::runOnMachineFunction(MachineFunction &MF) { !ContainsFPCode && SI != E; ++SI) { for (BasicBlock::const_iterator II = SI->begin(); (PN = dyn_cast<PHINode>(II)); ++II) { - if (PN->getType()==Type::X86_FP80Ty || + if (PN->getType()==Type::getX86_FP80Ty(LLVMBB->getContext()) || (!Subtarget.hasSSE1() && PN->getType()->isFloatingPoint()) || - (!Subtarget.hasSSE2() && PN->getType()==Type::DoubleTy)) { + (!Subtarget.hasSSE2() && + PN->getType()==Type::getDoubleTy(LLVMBB->getContext()))) { ContainsFPCode = true; break; } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7507eb2a1b..ca6ff929a7 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5831,7 +5831,7 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, if (const char *bzeroEntry = V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) { EVT IntPtr = getPointerTy(); - const Type *IntPtrTy = TD->getIntPtrType(); + const Type *IntPtrTy = TD->getIntPtrType(*DAG.getContext()); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; Entry.Node = Dst; @@ -5840,7 +5840,8 @@ X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, Entry.Node = Size; Args.push_back(Entry); std::pair<SDValue,SDValue> CallResult = - LowerCallTo(Chain, Type::VoidTy, false, false, false, false, + LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), + false, false, false, false, 0, CallingConv::C, false, /*isReturnValueUsed=*/false, DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG, dl); return CallResult.second; @@ -7159,7 +7160,8 @@ bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { bool X86TargetLowering::isZExtFree(const Type *Ty1, const Type *Ty2) const { // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. - return Ty1 == Type::Int32Ty && Ty2 == Type::Int64Ty && Subtarget->is64Bit(); + return Ty1 == Type::getInt32Ty(Ty1->getContext()) && + Ty2 == Type::getInt64Ty(Ty1->getContext()) && Subtarget->is64Bit(); } bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { @@ -8768,7 +8770,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { return LowerToBSwap(CI); } // rorw $$8, ${0:w} --> llvm.bswap.i16 - if (CI->getType() == Type::Int16Ty && + if (CI->getType() == Type::getInt16Ty(CI->getContext()) && AsmPieces.size() == 3 && AsmPieces[0] == "rorw" && AsmPieces[1] == "$$8," && @@ -8778,7 +8780,8 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { } break; case 3: - if (CI->getType() == Type::Int64Ty && Constraints.size() >= 2 && + if (CI->getType() == Type::getInt64Ty(CI->getContext()) && + Constraints.size() >= 2 && Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 @@ -8896,7 +8899,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, // 32-bit signed value if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { const ConstantInt *CI = C->getConstantIntValue(); - if (CI->isValueValidForType(Type::Int32Ty, C->getSExtValue())) { + if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()), + C->getSExtValue())) { // Widen to 64 bits here to get it sign extended. Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64); break; @@ -8910,7 +8914,8 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, // 32-bit unsigned value if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { const ConstantInt *CI = C->getConstantIntValue(); - if (CI->isValueValidForType(Type::Int32Ty, C->getZExtValue())) { + if (CI->isValueValidForType(Type::getInt32Ty(*DAG.getContext()), + C->getZExtValue())) { Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); break; } diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 297c4dd175..6e8561d8e4 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -2299,7 +2299,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Create a v4i32 constant-pool entry. MachineConstantPool &MCP = *MF.getConstantPool(); - const VectorType *Ty = VectorType::get(Type::Int32Ty, 4); + const VectorType *Ty = + VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4); Constant *C = LoadMI->getOpcode() == X86::V_SET0 ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty); |