aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Analysis
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2011-07-18 04:54:35 +0000
committerChris Lattner <sabre@nondot.org>2011-07-18 04:54:35 +0000
commitdb125cfaf57cc83e7dd7453de2d509bc8efd0e5e (patch)
treea163ac0f83da7be3f9675a122a6144b12418be09 /lib/Analysis
parent4b3d5469fb7c25504fa20dc65640f02d79675d48 (diff)
downloadexternal_llvm-db125cfaf57cc83e7dd7453de2d509bc8efd0e5e.tar.gz
external_llvm-db125cfaf57cc83e7dd7453de2d509bc8efd0e5e.tar.bz2
external_llvm-db125cfaf57cc83e7dd7453de2d509bc8efd0e5e.zip
land David Blaikie's patch to de-constify Type, with a few tweaks.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@135375 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/AliasAnalysis.cpp2
-rw-r--r--lib/Analysis/AliasAnalysisEvaluator.cpp6
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp4
-rw-r--r--lib/Analysis/ConstantFolding.cpp62
-rw-r--r--lib/Analysis/IPA/FindUsedTypes.cpp4
-rw-r--r--lib/Analysis/InstructionSimplify.cpp18
-rw-r--r--lib/Analysis/LazyValueInfo.cpp6
-rw-r--r--lib/Analysis/Lint.cpp8
-rw-r--r--lib/Analysis/Loads.cpp6
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp16
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp2
-rw-r--r--lib/Analysis/ScalarEvolution.cpp142
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp64
-rw-r--r--lib/Analysis/ValueTracking.cpp30
14 files changed, 185 insertions, 185 deletions
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index c189a00429..bfa02e0e1f 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -341,7 +341,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
/// getTypeStoreSize - Return the TargetData store size for the given type,
/// if known, or a conservative value otherwise.
///
-uint64_t AliasAnalysis::getTypeStoreSize(const Type *Ty) {
+uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
return TD ? TD->getTypeStoreSize(Ty) : UnknownSize;
}
diff --git a/lib/Analysis/AliasAnalysisEvaluator.cpp b/lib/Analysis/AliasAnalysisEvaluator.cpp
index 1afc1b71d9..37271b94a2 100644
--- a/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -171,12 +171,12 @@ bool AAEval::runOnFunction(Function &F) {
for (SetVector<Value *>::iterator I1 = Pointers.begin(), E = Pointers.end();
I1 != E; ++I1) {
uint64_t I1Size = AliasAnalysis::UnknownSize;
- const Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
+ Type *I1ElTy = cast<PointerType>((*I1)->getType())->getElementType();
if (I1ElTy->isSized()) I1Size = AA.getTypeStoreSize(I1ElTy);
for (SetVector<Value *>::iterator I2 = Pointers.begin(); I2 != I1; ++I2) {
uint64_t I2Size = AliasAnalysis::UnknownSize;
- const Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
+ Type *I2ElTy =cast<PointerType>((*I2)->getType())->getElementType();
if (I2ElTy->isSized()) I2Size = AA.getTypeStoreSize(I2ElTy);
switch (AA.alias(*I1, I1Size, *I2, I2Size)) {
@@ -207,7 +207,7 @@ bool AAEval::runOnFunction(Function &F) {
for (SetVector<Value *>::iterator V = Pointers.begin(), Ve = Pointers.end();
V != Ve; ++V) {
uint64_t Size = AliasAnalysis::UnknownSize;
- const Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
+ Type *ElTy = cast<PointerType>((*V)->getType())->getElementType();
if (ElTy->isSized()) Size = AA.getTypeStoreSize(ElTy);
switch (AA.getModRefInfo(*C, *V, Size)) {
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index 8330ea7c70..116076ce2a 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -100,7 +100,7 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const TargetData &TD) {
- const Type *AccessTy;
+ Type *AccessTy;
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
if (!GV->hasDefinitiveInitializer())
return AliasAnalysis::UnknownSize;
@@ -317,7 +317,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
E = GEPOp->op_end(); I != E; ++I) {
Value *Index = *I;
// Compute the (potentially symbolic) offset in bytes for this index.
- if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0) continue;
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 7fca17eb69..171f924f62 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -43,11 +43,11 @@ using namespace llvm;
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
/// TargetData. This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
-static Constant *FoldBitCast(Constant *C, const Type *DestTy,
+static Constant *FoldBitCast(Constant *C, Type *DestTy,
const TargetData &TD) {
// This only handles casts to vectors currently.
- const VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
+ VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (DestVTy == 0)
return ConstantExpr::getBitCast(C, DestTy);
@@ -69,8 +69,8 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
- const Type *SrcEltTy = CV->getType()->getElementType();
- const Type *DstEltTy = DestVTy->getElementType();
+ Type *SrcEltTy = CV->getType()->getElementType();
+ Type *DstEltTy = DestVTy->getElementType();
// Otherwise, we're changing the number of elements in a vector, which
// requires endianness information to do the right thing. For example,
@@ -85,7 +85,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
if (DstEltTy->isFloatingPointTy()) {
// Fold to an vector of integers with same size as our FP type.
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
- const Type *DestIVTy =
+ Type *DestIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
@@ -99,7 +99,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
// it to integer first.
if (SrcEltTy->isFloatingPointTy()) {
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
- const Type *SrcIVTy =
+ Type *SrcIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
// Ask VMCore to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
@@ -212,11 +212,11 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
if (!CI) return false; // Index isn't a simple constant?
if (CI->isZero()) continue; // Not adding anything.
- if (const StructType *ST = dyn_cast<StructType>(*GTI)) {
+ if (StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
} else {
- const SequentialType *SQT = cast<SequentialType>(*GTI);
+ SequentialType *SQT = cast<SequentialType>(*GTI);
Offset += TD.getTypeAllocSize(SQT->getElementType())*CI->getSExtValue();
}
}
@@ -354,8 +354,8 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
const TargetData &TD) {
- const Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
- const IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
+ Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
+ IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
@@ -363,7 +363,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
// and then bitcast the result. This can be useful for union cases. Note
// that address spaces don't matter here since we're not going to result in
// an actual new load.
- const Type *MapTy;
+ Type *MapTy;
if (LoadTy->isFloatTy())
MapTy = Type::getInt32PtrTy(C->getContext());
else if (LoadTy->isDoubleTy())
@@ -443,7 +443,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
std::string Str;
if (TD && GetConstantStringInfo(CE, Str) && !Str.empty()) {
unsigned StrLen = Str.length();
- const Type *Ty = cast<PointerType>(CE->getType())->getElementType();
+ Type *Ty = cast<PointerType>(CE->getType())->getElementType();
unsigned NumBits = Ty->getPrimitiveSizeInBits();
// Replace load with immediate integer if the result is an integer or fp
// value.
@@ -478,7 +478,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
if (GlobalVariable *GV =
dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
- const Type *ResTy = cast<PointerType>(C->getType())->getElementType();
+ Type *ResTy = cast<PointerType>(C->getType())->getElementType();
if (GV->getInitializer()->isNullValue())
return Constant::getNullValue(ResTy);
if (isa<UndefValue>(GV->getInitializer()))
@@ -537,10 +537,10 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// explicitly cast them so that they aren't implicitly casted by the
/// getelementptr.
static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
- const Type *ResultTy,
+ Type *ResultTy,
const TargetData *TD) {
if (!TD) return 0;
- const Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
bool Any = false;
SmallVector<Constant*, 32> NewIdxs;
@@ -572,13 +572,13 @@ static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
- const Type *ResultTy,
+ Type *ResultTy,
const TargetData *TD) {
Constant *Ptr = Ops[0];
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
return 0;
- const Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
@@ -649,10 +649,10 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
// we eliminate over-indexing of the notional static type array bounds.
// This makes it easy to determine if the getelementptr is "inbounds".
// Also, this helps GlobalOpt do SROA on GlobalVariables.
- const Type *Ty = Ptr->getType();
+ Type *Ty = Ptr->getType();
SmallVector<Constant*, 32> NewIdxs;
do {
- if (const SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
+ if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
if (ATy->isPointerTy()) {
// The only pointer indexing we'll do is on the first index of the GEP.
if (!NewIdxs.empty())
@@ -665,7 +665,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
- const IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
+ IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
if (ElemSize == 0)
// The element size is 0. This may be [0 x Ty]*, so just use a zero
// index for this level and proceed to the next level to see if it can
@@ -679,7 +679,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
}
Ty = ATy->getElementType();
- } else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
// Determine which field of the struct the offset points into. The
// getZExtValue is at least as safe as the StructLayout API because we
// know the offset is within the struct at this point.
@@ -814,7 +814,7 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
/// information, due to only being passed an opcode and operands. Constant
/// folding using this function strips this information.
///
-Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
+Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
Constant* const* Ops, unsigned NumOps,
const TargetData *TD) {
// Handle easy binops first.
@@ -912,7 +912,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
// around to know if bit truncation is happening.
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
if (TD && Ops1->isNullValue()) {
- const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
if (CE0->getOpcode() == Instruction::IntToPtr) {
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
@@ -934,7 +934,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
- const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
if (CE0->getOpcode() == Instruction::IntToPtr) {
// Convert the integer value to the right size to ensure we get the
@@ -987,7 +987,7 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
// addressing...
gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
for (++I; I != E; ++I)
- if (const StructType *STy = dyn_cast<StructType>(*I)) {
+ if (StructType *STy = dyn_cast<StructType>(*I)) {
ConstantInt *CU = cast<ConstantInt>(I.getOperand());
assert(CU->getZExtValue() < STy->getNumElements() &&
"Struct index out of range!");
@@ -1002,7 +1002,7 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
return 0;
}
} else if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) {
- if (const ArrayType *ATy = dyn_cast<ArrayType>(*I)) {
+ if (ArrayType *ATy = dyn_cast<ArrayType>(*I)) {
if (CI->getZExtValue() >= ATy->getNumElements())
return 0;
if (ConstantArray *CA = dyn_cast<ConstantArray>(C))
@@ -1013,7 +1013,7 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
C = UndefValue::get(ATy->getElementType());
else
return 0;
- } else if (const VectorType *VTy = dyn_cast<VectorType>(*I)) {
+ } else if (VectorType *VTy = dyn_cast<VectorType>(*I)) {
if (CI->getZExtValue() >= VTy->getNumElements())
return 0;
if (ConstantVector *CP = dyn_cast<ConstantVector>(C))
@@ -1101,7 +1101,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
}
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
- const Type *Ty) {
+ Type *Ty) {
sys::llvm_fenv_clearexcept();
V = NativeFP(V);
if (sys::llvm_fenv_testexcept()) {
@@ -1118,7 +1118,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
}
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
- double V, double W, const Type *Ty) {
+ double V, double W, Type *Ty) {
sys::llvm_fenv_clearexcept();
V = NativeFP(V, W);
if (sys::llvm_fenv_testexcept()) {
@@ -1143,7 +1143,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
/// performed, otherwise returns the Constant value resulting from the
/// conversion.
static Constant *ConstantFoldConvertToInt(ConstantFP *Op, bool roundTowardZero,
- const Type *Ty) {
+ Type *Ty) {
assert(Op && "Called with NULL operand");
APFloat Val(Op->getValueAPF());
@@ -1172,7 +1172,7 @@ llvm::ConstantFoldCall(Function *F,
if (!F->hasName()) return 0;
StringRef Name = F->getName();
- const Type *Ty = F->getReturnType();
+ Type *Ty = F->getReturnType();
if (NumOperands == 1) {
if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) {
diff --git a/lib/Analysis/IPA/FindUsedTypes.cpp b/lib/Analysis/IPA/FindUsedTypes.cpp
index 6535786668..e9df3ca010 100644
--- a/lib/Analysis/IPA/FindUsedTypes.cpp
+++ b/lib/Analysis/IPA/FindUsedTypes.cpp
@@ -29,7 +29,7 @@ INITIALIZE_PASS(FindUsedTypes, "print-used-types",
// IncorporateType - Incorporate one type and all of its subtypes into the
// collection of used types.
//
-void FindUsedTypes::IncorporateType(const Type *Ty) {
+void FindUsedTypes::IncorporateType(Type *Ty) {
// If ty doesn't already exist in the used types map, add it now, otherwise
// return.
if (!UsedTypes.insert(Ty)) return; // Already contain Ty.
@@ -94,7 +94,7 @@ bool FindUsedTypes::runOnModule(Module &m) {
//
void FindUsedTypes::print(raw_ostream &OS, const Module *M) const {
OS << "Types in use by this module:\n";
- for (SetVector<const Type *>::const_iterator I = UsedTypes.begin(),
+ for (SetVector<Type *>::const_iterator I = UsedTypes.begin(),
E = UsedTypes.end(); I != E; ++I) {
OS << " " << **I << '\n';
}
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 8709f6bf9d..135be6d8b1 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -1372,7 +1372,7 @@ Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD,
return ::SimplifyXorInst(Op0, Op1, TD, DT, RecursionLimit);
}
-static const Type *GetCompareTy(Value *Op) {
+static Type *GetCompareTy(Value *Op) {
return CmpInst::makeCmpResultType(Op->getType());
}
@@ -1413,8 +1413,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Pred = CmpInst::getSwappedPredicate(Pred);
}
- const Type *ITy = GetCompareTy(LHS); // The return type.
- const Type *OpTy = LHS->getType(); // The operand type.
+ Type *ITy = GetCompareTy(LHS); // The return type.
+ Type *OpTy = LHS->getType(); // The operand type.
// icmp X, X -> true/false
// X icmp undef -> true/false. For example, icmp ugt %X, undef -> false
@@ -1593,8 +1593,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
Instruction *LI = cast<CastInst>(LHS);
Value *SrcOp = LI->getOperand(0);
- const Type *SrcTy = SrcOp->getType();
- const Type *DstTy = LI->getType();
+ Type *SrcTy = SrcOp->getType();
+ Type *DstTy = LI->getType();
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
@@ -2222,7 +2222,7 @@ Value *llvm::SimplifySelectInst(Value *CondVal, Value *TrueVal, Value *FalseVal,
Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
const TargetData *TD, const DominatorTree *) {
// The type of the GEP pointer operand.
- const PointerType *PtrTy = cast<PointerType>(Ops[0]->getType());
+ PointerType *PtrTy = cast<PointerType>(Ops[0]->getType());
// getelementptr P -> P.
if (NumOps == 1)
@@ -2230,9 +2230,9 @@ Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
if (isa<UndefValue>(Ops[0])) {
// Compute the (pointer) type returned by the GEP instruction.
- const Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, &Ops[1],
+ Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, &Ops[1],
NumOps-1);
- const Type *GEPTy = PointerType::get(LastType, PtrTy->getAddressSpace());
+ Type *GEPTy = PointerType::get(LastType, PtrTy->getAddressSpace());
return UndefValue::get(GEPTy);
}
@@ -2243,7 +2243,7 @@ Value *llvm::SimplifyGEPInst(Value *const *Ops, unsigned NumOps,
return Ops[0];
// getelementptr P, N -> P if P points to a type of zero size.
if (TD) {
- const Type *Ty = PtrTy->getElementType();
+ Type *Ty = PtrTy->getElementType();
if (Ty->isSized() && TD->getTypeAllocSize(Ty) == 0)
return Ops[0];
}
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index 6e27597827..f80595c7db 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -630,7 +630,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
if (BB == &BB->getParent()->getEntryBlock()) {
assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
if (NotNull) {
- const PointerType *PTy = cast<PointerType>(Val->getType());
+ PointerType *PTy = cast<PointerType>(Val->getType());
Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
} else {
Result.markOverdefined();
@@ -658,7 +658,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
// If we previously determined that this is a pointer that can't be null
// then return that rather than giving up entirely.
if (NotNull) {
- const PointerType *PTy = cast<PointerType>(Val->getType());
+ PointerType *PTy = cast<PointerType>(Val->getType());
Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
}
@@ -728,7 +728,7 @@ bool LazyValueInfoCache::solveBlockValueConstantRange(LVILatticeVal &BBLV,
ConstantRange LHSRange = LHSVal.getConstantRange();
ConstantRange RHSRange(1);
- const IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
+ IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
if (isa<BinaryOperator>(BBI)) {
if (ConstantInt *RHS = dyn_cast<ConstantInt>(BBI->getOperand(1))) {
RHSRange = ConstantRange(RHS->getValue());
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 89755da850..38d677d502 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -71,7 +71,7 @@ namespace {
void visitCallSite(CallSite CS);
void visitMemoryReference(Instruction &I, Value *Ptr,
uint64_t Size, unsigned Align,
- const Type *Ty, unsigned Flags);
+ Type *Ty, unsigned Flags);
void visitCallInst(CallInst &I);
void visitInvokeInst(InvokeInst &I);
@@ -201,7 +201,7 @@ void Lint::visitCallSite(CallSite CS) {
"Undefined behavior: Caller and callee calling convention differ",
&I);
- const FunctionType *FT = F->getFunctionType();
+ FunctionType *FT = F->getFunctionType();
unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
Assert1(FT->isVarArg() ?
@@ -240,7 +240,7 @@ void Lint::visitCallSite(CallSite CS) {
// Check that an sret argument points to valid memory.
if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
- const Type *Ty =
+ Type *Ty =
cast<PointerType>(Formal->getType())->getElementType();
visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
TD ? TD->getABITypeAlignment(Ty) : 0,
@@ -364,7 +364,7 @@ void Lint::visitReturnInst(ReturnInst &I) {
// TODO: Check readnone/readonly function attributes.
void Lint::visitMemoryReference(Instruction &I,
Value *Ptr, uint64_t Size, unsigned Align,
- const Type *Ty, unsigned Flags) {
+ Type *Ty, unsigned Flags) {
// If no memory is being referenced, it doesn't matter if the pointer
// is valid.
if (Size == 0)
diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp
index c5c676b526..1f554a3da2 100644
--- a/lib/Analysis/Loads.cpp
+++ b/lib/Analysis/Loads.cpp
@@ -90,7 +90,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
if (TD)
Base = getUnderlyingObjectWithOffset(V, TD, ByteOffset);
- const Type *BaseType = 0;
+ Type *BaseType = 0;
unsigned BaseAlign = 0;
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
// An alloca is safe to load from as load as it is suitably aligned.
@@ -114,7 +114,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
return true; // Loading directly from an alloca or global is OK.
// Check if the load is within the bounds of the underlying object.
- const PointerType *AddrTy = cast<PointerType>(V->getType());
+ PointerType *AddrTy = cast<PointerType>(V->getType());
uint64_t LoadSize = TD->getTypeStoreSize(AddrTy->getElementType());
if (ByteOffset + LoadSize <= TD->getTypeAllocSize(BaseType) &&
(Align == 0 || (ByteOffset % Align) == 0))
@@ -169,7 +169,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// If we're using alias analysis to disambiguate get the size of *Ptr.
uint64_t AccessSize = 0;
if (AA) {
- const Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
+ Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();
AccessSize = AA->getTypeStoreSize(AccessTy);
}
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index 53d4304911..8d451c46f9 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -47,7 +47,7 @@ static bool isMallocCall(const CallInst *CI) {
// Check malloc prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
- const FunctionType *FTy = Callee->getFunctionType();
+ FunctionType *FTy = Callee->getFunctionType();
if (FTy->getNumParams() != 1)
return false;
return FTy->getParamType(0)->isIntegerTy(32) ||
@@ -94,12 +94,12 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
return NULL;
// The size of the malloc's result type must be known to determine array size.
- const Type *T = getMallocAllocatedType(CI);
+ Type *T = getMallocAllocatedType(CI);
if (!T || !T->isSized() || !TD)
return NULL;
unsigned ElementSize = TD->getTypeAllocSize(T);
- if (const StructType *ST = dyn_cast<StructType>(T))
+ if (StructType *ST = dyn_cast<StructType>(T))
ElementSize = TD->getStructLayout(ST)->getSizeInBytes();
// If malloc call's arg can be determined to be a multiple of ElementSize,
@@ -133,10 +133,10 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
/// 0: PointerType is the calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-const PointerType *llvm::getMallocType(const CallInst *CI) {
+PointerType *llvm::getMallocType(const CallInst *CI) {
assert(isMalloc(CI) && "getMallocType and not malloc call");
- const PointerType *MallocType = NULL;
+ PointerType *MallocType = NULL;
unsigned NumOfBitCastUses = 0;
// Determine if CallInst has a bitcast use.
@@ -164,8 +164,8 @@ const PointerType *llvm::getMallocType(const CallInst *CI) {
/// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL.
-const Type *llvm::getMallocAllocatedType(const CallInst *CI) {
- const PointerType *PT = getMallocType(CI);
+Type *llvm::getMallocAllocatedType(const CallInst *CI) {
+ PointerType *PT = getMallocType(CI);
return PT ? PT->getElementType() : NULL;
}
@@ -201,7 +201,7 @@ const CallInst *llvm::isFreeCall(const Value *I) {
// Check free prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
- const FunctionType *FTy = Callee->getFunctionType();
+ FunctionType *FTy = Callee->getFunctionType();
if (!FTy->getReturnType()->isVoidTy())
return 0;
if (FTy->getNumParams() != 1)
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index bba4482f4d..34ba92509e 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -382,7 +382,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// location is 1 byte at P+1). If so, return it as a load/load
// clobber result, allowing the client to decide to widen the load if
// it wants to.
- if (const IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
+ if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
MemLocOffset, LI, TD))
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 025718e09f..05267d12d8 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -197,7 +197,7 @@ void SCEV::print(raw_ostream &OS) const {
}
case scUnknown: {
const SCEVUnknown *U = cast<SCEVUnknown>(this);
- const Type *AllocTy;
+ Type *AllocTy;
if (U->isSizeOf(AllocTy)) {
OS << "sizeof(" << *AllocTy << ")";
return;
@@ -207,7 +207,7 @@ void SCEV::print(raw_ostream &OS) const {
return;
}
- const Type *CTy;
+ Type *CTy;
Constant *FieldNo;
if (U->isOffsetOf(CTy, FieldNo)) {
OS << "offsetof(" << *CTy << ", ";
@@ -228,7 +228,7 @@ void SCEV::print(raw_ostream &OS) const {
llvm_unreachable("Unknown SCEV kind!");
}
-const Type *SCEV::getType() const {
+Type *SCEV::getType() const {
switch (getSCEVType()) {
case scConstant:
return cast<SCEVConstant>(this)->getType();
@@ -297,17 +297,17 @@ const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
}
const SCEV *
-ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
- const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
+ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
+ IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
return getConstant(ConstantInt::get(ITy, V, isSigned));
}
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
- unsigned SCEVTy, const SCEV *op, const Type *ty)
+ unsigned SCEVTy, const SCEV *op, Type *ty)
: SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, const Type *ty)
+ const SCEV *op, Type *ty)
: SCEVCastExpr(ID, scTruncate, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
@@ -315,7 +315,7 @@ SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
}
SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, const Type *ty)
+ const SCEV *op, Type *ty)
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
@@ -323,7 +323,7 @@ SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
}
SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
- const SCEV *op, const Type *ty)
+ const SCEV *op, Type *ty)
: SCEVCastExpr(ID, scSignExtend, op, ty) {
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
@@ -354,7 +354,7 @@ void SCEVUnknown::allUsesReplacedWith(Value *New) {
setValPtr(New);
}
-bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
+bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
@@ -371,15 +371,15 @@ bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
return false;
}
-bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
+bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
if (CE->getOpcode() == Instruction::GetElementPtr &&
CE->getOperand(0)->isNullValue()) {
- const Type *Ty =
+ Type *Ty =
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
- if (const StructType *STy = dyn_cast<StructType>(Ty))
+ if (StructType *STy = dyn_cast<StructType>(Ty))
if (!STy->isPacked() &&
CE->getNumOperands() == 3 &&
CE->getOperand(1)->isNullValue()) {
@@ -396,7 +396,7 @@ bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
return false;
}
-bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
+bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
@@ -404,7 +404,7 @@ bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
CE->getNumOperands() == 3 &&
CE->getOperand(0)->isNullValue() &&
CE->getOperand(1)->isNullValue()) {
- const Type *Ty =
+ Type *Ty =
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
// Ignore vector types here so that ScalarEvolutionExpander doesn't
// emit getelementptrs that index into vectors.
@@ -652,7 +652,7 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
/// Assume, K > 0.
static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
ScalarEvolution &SE,
- const Type* ResultTy) {
+ Type* ResultTy) {
// Handle the simplest case efficiently.
if (K == 1)
return SE.getTruncateOrZeroExtend(It, ResultTy);
@@ -742,7 +742,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
MultiplyFactor = MultiplyFactor.trunc(W);
// Calculate the product, at width T+W
- const IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
+ IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
CalculationBits);
const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
for (unsigned i = 1; i != K; ++i) {
@@ -790,7 +790,7 @@ const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
//===----------------------------------------------------------------------===//
const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
- const Type *Ty) {
+ Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
"This is not a truncating conversion!");
assert(isSCEVable(Ty) &&
@@ -877,7 +877,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
}
const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
- const Type *Ty) {
+ Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -954,7 +954,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
const SCEV *RecastedMaxBECount =
getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
if (MaxBECount == RecastedMaxBECount) {
- const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
+ Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no unsigned overflow.
const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
const SCEV *Add = getAddExpr(Start, ZMul);
@@ -1062,7 +1062,7 @@ static const SCEV *getOverflowLimitForStep(const SCEV *Step,
// result, the expression "Step + sext(PreIncAR)" is congruent with
// "sext(PostIncAR)"
static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
- const Type *Ty,
+ Type *Ty,
ScalarEvolution *SE) {
const Loop *L = AR->getLoop();
const SCEV *Start = AR->getStart();
@@ -1086,7 +1086,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
// 2. Direct overflow check on the step operation's expression.
unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
- const Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
+ Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
const SCEV *OperandExtendedStart =
SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
SE->getSignExtendExpr(Step, WideTy));
@@ -1112,7 +1112,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
// Get the normalized sign-extended expression for this AddRec's Start.
static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
- const Type *Ty,
+ Type *Ty,
ScalarEvolution *SE) {
const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
if (!PreStart)
@@ -1123,7 +1123,7 @@ static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
}
const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
- const Type *Ty) {
+ Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1208,7 +1208,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
const SCEV *RecastedMaxBECount =
getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
if (MaxBECount == RecastedMaxBECount) {
- const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
+ Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no signed overflow.
const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
const SCEV *Add = getAddExpr(Start, SMul);
@@ -1275,7 +1275,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
/// unspecified bits out to the given type.
///
const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
- const Type *Ty) {
+ Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
"This is not an extending conversion!");
assert(isSCEVable(Ty) &&
@@ -1438,7 +1438,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
assert(!Ops.empty() && "Cannot get empty add!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVAddExpr operand types don't match!");
@@ -1488,7 +1488,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// Okay, check to see if the same value occurs in the operand list more than
// once. If so, merge them together into an multiply expression. Since we
// sorted the list, these values are required to be adjacent.
- const Type *Ty = Ops[0]->getType();
+ Type *Ty = Ops[0]->getType();
bool FoundMatch = false;
for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
@@ -1515,8 +1515,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
// if the contents of the resulting outer trunc fold to something simple.
for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
- const Type *DstType = Trunc->getType();
- const Type *SrcType = Trunc->getOperand()->getType();
+ Type *DstType = Trunc->getType();
+ Type *SrcType = Trunc->getOperand()->getType();
SmallVector<const SCEV *, 8> LargeOps;
bool Ok = true;
// Check all the operands to see if they can be represented in the
@@ -1809,7 +1809,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
assert(!Ops.empty() && "Cannot get empty mul!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVMulExpr operand types don't match!");
@@ -2042,14 +2042,14 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
// Determine if the division can be folded into the operands of
// its operands.
// TODO: Generalize this to non-constants by using known-bits information.
- const Type *Ty = LHS->getType();
+ Type *Ty = LHS->getType();
unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
// For non-power-of-two values, effectively round the value up to the
// nearest power of two.
if (!RHSC->getValue()->getValue().isPowerOf2())
++MaxShiftAmt;
- const IntegerType *ExtTy =
+ IntegerType *ExtTy =
IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
// {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
@@ -2151,7 +2151,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
const Loop *L, SCEV::NoWrapFlags Flags) {
if (Operands.size() == 1) return Operands[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
"SCEVAddRecExpr operand types don't match!");
@@ -2269,7 +2269,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
assert(!Ops.empty() && "Cannot get empty smax!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVSMaxExpr operand types don't match!");
@@ -2373,7 +2373,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
assert(!Ops.empty() && "Cannot get empty umax!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
- const Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+ Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
"SCEVUMaxExpr operand types don't match!");
@@ -2476,7 +2476,7 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
}
-const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
+const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
// If we have TargetData, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
@@ -2488,20 +2488,20 @@ const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
C = Folded;
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
+const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
Constant *C = ConstantExpr::getAlignOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
C = Folded;
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
+const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
unsigned FieldNo) {
// If we have TargetData, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
@@ -2514,17 +2514,17 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
C = Folded;
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
+ Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
+const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy,
Constant *FieldNo) {
Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
C = Folded;
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
+ Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
@@ -2558,14 +2558,14 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
/// the SCEV framework. This primarily includes integer types, and it
/// can optionally include pointer types if the ScalarEvolution class
/// has access to target-specific information.
-bool ScalarEvolution::isSCEVable(const Type *Ty) const {
+bool ScalarEvolution::isSCEVable(Type *Ty) const {
// Integers and pointers are always SCEVable.
return Ty->isIntegerTy() || Ty->isPointerTy();
}
/// getTypeSizeInBits - Return the size in bits of the specified type,
/// for which isSCEVable must return true.
-uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
+uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
// If we have a TargetData, use it!
@@ -2586,7 +2586,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
/// the given type and which represents how SCEV will treat the given
/// type, for which isSCEVable must return true. For pointer types,
/// this is the pointer-sized integer type.
-const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
+Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
if (Ty->isIntegerTy())
@@ -2628,7 +2628,7 @@ const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
return getConstant(
cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
- const Type *Ty = V->getType();
+ Type *Ty = V->getType();
Ty = getEffectiveSCEVType(Ty);
return getMulExpr(V,
getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
@@ -2640,7 +2640,7 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
return getConstant(
cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
- const Type *Ty = V->getType();
+ Type *Ty = V->getType();
Ty = getEffectiveSCEVType(Ty);
const SCEV *AllOnes =
getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
@@ -2664,8 +2664,8 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
/// input value to the specified type. If the type must be extended, it is zero
/// extended.
const SCEV *
-ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate or zero extend with non-integer arguments!");
@@ -2681,8 +2681,8 @@ ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) {
/// extended.
const SCEV *
ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
- const Type *Ty) {
- const Type *SrcTy = V->getType();
+ Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate or zero extend with non-integer arguments!");
@@ -2697,8 +2697,8 @@ ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
/// input value to the specified type. If the type must be extended, it is zero
/// extended. The conversion must not be narrowing.
const SCEV *
-ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot noop or zero extend with non-integer arguments!");
@@ -2713,8 +2713,8 @@ ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
/// input value to the specified type. If the type must be extended, it is sign
/// extended. The conversion must not be narrowing.
const SCEV *
-ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot noop or sign extend with non-integer arguments!");
@@ -2730,8 +2730,8 @@ ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
/// it is extended with unspecified bits. The conversion must not be
/// narrowing.
const SCEV *
-ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot noop or any extend with non-integer arguments!");
@@ -2745,8 +2745,8 @@ ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
/// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
/// input value to the specified type. The conversion must not be widening.
const SCEV *
-ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
- const Type *SrcTy = V->getType();
+ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
+ Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate or noop with non-integer arguments!");
@@ -3032,7 +3032,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
// context.
bool isInBounds = GEP->isInBounds();
- const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
+ Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
// Don't attempt to analyze GEPs over unsized objects.
if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
@@ -3044,7 +3044,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
I != E; ++I) {
Value *Index = *I;
// Compute the (potentially symbolic) offset in bytes for this index.
- if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
@@ -3244,7 +3244,7 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
// TODO: non-affine addrec
if (AddRec->isAffine()) {
- const Type *Ty = AddRec->getType();
+ Type *Ty = AddRec->getType();
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
@@ -3396,7 +3396,7 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
// TODO: non-affine addrec
if (AddRec->isAffine()) {
- const Type *Ty = AddRec->getType();
+ Type *Ty = AddRec->getType();
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
@@ -3601,9 +3601,9 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
LCI->getValue() == CI->getValue())
if (const SCEVZeroExtendExpr *Z =
dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
- const Type *UTy = U->getType();
+ Type *UTy = U->getType();
const SCEV *Z0 = Z->getOperand();
- const Type *Z0Ty = Z0->getType();
+ Type *Z0Ty = Z0->getType();
unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
// If C is a low-bits mask, the zero extend is serving to
@@ -4321,10 +4321,10 @@ GetAddressedElementFromGlobal(GlobalVariable *GV,
if (Idx >= CA->getNumOperands()) return 0; // Bogus program
Init = cast<Constant>(CA->getOperand(Idx));
} else if (isa<ConstantAggregateZero>(Init)) {
- if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
+ if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
assert(Idx < STy->getNumElements() && "Bad struct index!");
Init = Constant::getNullValue(STy->getElementType(Idx));
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
if (Idx >= ATy->getNumElements()) return 0; // Bogus program
Init = Constant::getNullValue(ATy->getElementType());
} else {
@@ -5741,7 +5741,7 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
assert(!isKnownNegative(Step) &&
"This code doesn't handle negative strides yet!");
- const Type *Ty = Start->getType();
+ Type *Ty = Start->getType();
// When Start == End, we have an exact BECount == 0. Short-circuit this case
// here because SCEV may not be able to determine that the unsigned division
@@ -5760,7 +5760,7 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
if (!NoWrap) {
// Check Add for unsigned overflow.
// TODO: More sophisticated things could be done here.
- const Type *WideTy = IntegerType::get(getContext(),
+ Type *WideTy = IntegerType::get(getContext(),
getTypeSizeInBits(Ty) + 1);
const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index befe6d2599..1904bdc586 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
/// reusing an existing cast if a suitable one exists, moving an existing
/// cast if a suitable one exists but isn't in the right place, or
/// creating a new one.
-Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
+Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP) {
// Check to see if there is already a cast!
@@ -62,7 +62,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, const Type *Ty,
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to share
/// the casts.
-Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
+Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
assert((Op == Instruction::BitCast ||
Op == Instruction::PtrToInt ||
@@ -277,7 +277,7 @@ static bool FactorOutConstant(const SCEV *&S,
/// the list.
///
static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
- const Type *Ty,
+ Type *Ty,
ScalarEvolution &SE) {
unsigned NumAddRecs = 0;
for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
@@ -306,7 +306,7 @@ static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
/// into GEP indices.
///
static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
- const Type *Ty,
+ Type *Ty,
ScalarEvolution &SE) {
// Find the addrecs.
SmallVector<const SCEV *, 8> AddRecs;
@@ -365,10 +365,10 @@ static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
///
Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
const SCEV *const *op_end,
- const PointerType *PTy,
- const Type *Ty,
+ PointerType *PTy,
+ Type *Ty,
Value *V) {
- const Type *ElTy = PTy->getElementType();
+ Type *ElTy = PTy->getElementType();
SmallVector<Value *, 4> GepIndices;
SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
bool AnyNonZeroIndices = false;
@@ -423,7 +423,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
GepIndices.push_back(Scaled);
// Collect struct field index operands.
- while (const StructType *STy = dyn_cast<StructType>(ElTy)) {
+ while (StructType *STy = dyn_cast<StructType>(ElTy)) {
bool FoundFieldNo = false;
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
@@ -451,7 +451,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// appropriate struct type.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
- const Type *CTy;
+ Type *CTy;
Constant *FieldNo;
if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
GepIndices.push_back(FieldNo);
@@ -474,7 +474,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
}
}
- if (const ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
+ if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
ElTy = ATy->getElementType();
else
break;
@@ -691,7 +691,7 @@ public:
}
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
// Collect all the add operands in a loop, along with their associated loops.
// Iterate in reverse so that constants are emitted last, all else equal, and
@@ -717,7 +717,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// This is the first operand. Just expand it.
Sum = expand(Op);
++I;
- } else if (const PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
+ } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
// The running sum expression is a pointer. Try to form a getelementptr
// at this level with that as the base.
SmallVector<const SCEV *, 4> NewOps;
@@ -731,7 +731,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
NewOps.push_back(X);
}
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
- } else if (const PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
+ } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
// The running sum is an integer, and there's a pointer at this level.
// Try to form a getelementptr. If the running sum is instructions,
// use a SCEVUnknown to avoid re-analyzing them.
@@ -762,7 +762,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
}
Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
// Collect all the mul operands in a loop, along with their associated loops.
// Iterate in reverse so that constants are emitted last, all else equal.
@@ -804,7 +804,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
}
Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *LHS = expandCodeFor(S->getLHS(), Ty);
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
@@ -847,8 +847,8 @@ static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
PHINode *
SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
const Loop *L,
- const Type *ExpandTy,
- const Type *IntTy) {
+ Type *ExpandTy,
+ Type *IntTy) {
assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
// Reuse a previously-inserted PHI, if present.
@@ -969,7 +969,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
Value *IncV;
// If the PHI is a pointer, use a GEP, otherwise use an add or sub.
if (isPointer) {
- const PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
+ PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
// If the step isn't constant, don't use an implicitly scaled GEP, because
// that would require a multiply inside the loop.
if (!isa<ConstantInt>(StepV))
@@ -1001,8 +1001,8 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
}
Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
- const Type *STy = S->getType();
- const Type *IntTy = SE.getEffectiveSCEVType(STy);
+ Type *STy = S->getType();
+ Type *IntTy = SE.getEffectiveSCEVType(STy);
const Loop *L = S->getLoop();
// Determine a normalized form of this expression, which is the expression
@@ -1045,7 +1045,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Expand the core addrec. If we need post-loop scaling, force it to
// expand to an integer type to avoid the need for additional casting.
- const Type *ExpandTy = PostLoopScale ? IntTy : STy;
+ Type *ExpandTy = PostLoopScale ? IntTy : STy;
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
// Accommodate post-inc mode, if necessary.
@@ -1069,7 +1069,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Re-apply any non-loop-dominating offset.
if (PostLoopOffset) {
- if (const PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
+ if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
const SCEV *const OffsetArray[1] = { PostLoopOffset };
Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
} else {
@@ -1086,7 +1086,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
if (!CanonicalMode) return expandAddRecExprLiterally(S);
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
const Loop *L = S->getLoop();
// First check for an existing canonical IV in a suitable type.
@@ -1132,7 +1132,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// Dig into the expression to find the pointer base for a GEP.
ExposePointerBase(Base, RestArray[0], SE);
// If we found a pointer, expand the AddRec with a GEP.
- if (const PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
+ if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
// Make sure the Base isn't something exotic, such as a multiplied
// or divided pointer value. In those cases, the result type isn't
// actually a pointer type.
@@ -1216,7 +1216,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
}
Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateTrunc(V, Ty, "tmp");
@@ -1225,7 +1225,7 @@ Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
}
Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateZExt(V, Ty, "tmp");
@@ -1234,7 +1234,7 @@ Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
}
Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
- const Type *Ty = SE.getEffectiveSCEVType(S->getType());
+ Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateSExt(V, Ty, "tmp");
@@ -1244,7 +1244,7 @@ Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
- const Type *Ty = LHS->getType();
+ Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
// In the case of mixed integer and pointer types, do the
// rest of the comparisons as integer.
@@ -1268,7 +1268,7 @@ Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
- const Type *Ty = LHS->getType();
+ Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
// In the case of mixed integer and pointer types, do the
// rest of the comparisons as integer.
@@ -1290,7 +1290,7 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
return LHS;
}
-Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
+Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
Instruction *I) {
BasicBlock::iterator IP = I;
while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP))
@@ -1299,7 +1299,7 @@ Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty,
return expandCodeFor(SH, Ty);
}
-Value *SCEVExpander::expandCodeFor(const SCEV *SH, const Type *Ty) {
+Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
// Expand the code for this SCEV.
Value *V = expand(SH);
if (Ty) {
@@ -1384,7 +1384,7 @@ void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
/// starts at zero and steps by one on each iteration.
PHINode *
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
- const Type *Ty) {
+ Type *Ty) {
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
// Build a SCEV for {0,+,1}<L>.
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 455c91077d..3662582b67 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -34,7 +34,7 @@ const unsigned MaxDepth = 6;
/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
/// unknown returns 0). For vector types, returns the element type's bitwidth.
-static unsigned getBitWidth(const Type *Ty, const TargetData *TD) {
+static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
assert(isa<PointerType>(Ty) && "Expected a pointer type!");
@@ -103,7 +103,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
unsigned Align = GV->getAlignment();
if (Align == 0 && TD && GV->getType()->getElementType()->isSized()) {
- const Type *ObjectType = GV->getType()->getElementType();
+ Type *ObjectType = GV->getType()->getElementType();
// If the object is defined in the current Module, we'll be giving
// it the preferred alignment. Otherwise, we have to assume that it
// may only have the minimum ABI alignment.
@@ -268,7 +268,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// FALL THROUGH and handle them the same as zext/trunc.
case Instruction::ZExt:
case Instruction::Trunc: {
- const Type *SrcTy = I->getOperand(0)->getType();
+ Type *SrcTy = I->getOperand(0)->getType();
unsigned SrcBitWidth;
// Note that we handle pointer operands here because of inttoptr/ptrtoint
@@ -291,7 +291,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
return;
}
case Instruction::BitCast: {
- const Type *SrcTy = I->getOperand(0)->getType();
+ Type *SrcTy = I->getOperand(0)->getType();
if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
// TODO: For now, not handling conversions like:
// (bitcast i64 %x to <2 x i32>)
@@ -559,7 +559,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
gep_type_iterator GTI = gep_type_begin(I);
for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
Value *Index = I->getOperand(i);
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
// Handle struct member offset arithmetic.
if (!TD) return;
const StructLayout *SL = TD->getStructLayout(STy);
@@ -569,7 +569,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
CountTrailingZeros_64(Offset));
} else {
// Handle array index arithmetic.
- const Type *IndexedTy = GTI.getIndexedType();
+ Type *IndexedTy = GTI.getIndexedType();
if (!IndexedTy->isSized()) return;
unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
@@ -898,7 +898,7 @@ unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
assert((TD || V->getType()->isIntOrIntVectorTy()) &&
"ComputeNumSignBits requires a TargetData object to operate "
"on non-integer values!");
- const Type *Ty = V->getType();
+ Type *Ty = V->getType();
unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
Ty->getScalarSizeInBits();
unsigned Tmp, Tmp2;
@@ -1078,7 +1078,7 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
assert(Depth <= MaxDepth && "Limit Search Depth");
assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
- const Type *T = V->getType();
+ Type *T = V->getType();
ConstantInt *CI = dyn_cast<ConstantInt>(V);
@@ -1315,11 +1315,11 @@ Value *llvm::isBytewiseValue(Value *V) {
// indices from Idxs that should be left out when inserting into the resulting
// struct. To is the result struct built so far, new insertvalue instructions
// build on that.
-static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
+static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
SmallVector<unsigned, 10> &Idxs,
unsigned IdxSkip,
Instruction *InsertBefore) {
- const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
+ llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
if (STy) {
// Save the original To argument so we can modify it
Value *OrigTo = To;
@@ -1378,7 +1378,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
Instruction *InsertBefore) {
assert(InsertBefore && "Must have someplace to insert!");
- const Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
+ Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
idx_range);
Value *To = UndefValue::get(IndexedType);
SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
@@ -1404,7 +1404,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
&& "Not looking at a struct or array?");
assert(ExtractValueInst::getIndexedType(V->getType(), idx_range)
&& "Invalid indices for type?");
- const CompositeType *PTy = cast<CompositeType>(V->getType());
+ CompositeType *PTy = cast<CompositeType>(V->getType());
if (isa<UndefValue>(V))
return UndefValue::get(ExtractValueInst::getIndexedType(PTy,
@@ -1506,7 +1506,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
if (OpC->isZero()) continue;
// Handle a struct and array indices which add their offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
} else {
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
@@ -1557,8 +1557,8 @@ bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
return false;
// Make sure the index-ee is a pointer to array of i8.
- const PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
- const ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
+ PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
+ ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
if (AT == 0 || !AT->getElementType()->isIntegerTy(8))
return false;