aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Transforms/Scalar
diff options
context:
space:
mode:
authorDuncan Sands <baldrick@free.fr>2009-05-09 07:06:46 +0000
committerDuncan Sands <baldrick@free.fr>2009-05-09 07:06:46 +0000
commitec4f97dd91023f63e05e94a333cdd2d4c57efedf (patch)
tree0e48f8284a5e10d4f43e51986e586740ab6eb029 /lib/Transforms/Scalar
parentbf5d4b2759f97a7b2a32c1ef22fbf8e3b46bc28e (diff)
downloadexternal_llvm-ec4f97dd91023f63e05e94a333cdd2d4c57efedf.tar.gz
external_llvm-ec4f97dd91023f63e05e94a333cdd2d4c57efedf.tar.bz2
external_llvm-ec4f97dd91023f63e05e94a333cdd2d4c57efedf.zip
Rename PaddedSize to AllocSize, in the hope that this
will make it more obvious what it represents, and stop it being confused with the StoreSize. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@71349 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp8
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp30
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp8
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp40
4 files changed, 43 insertions, 43 deletions
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index 2d38e76dbe..b923c92bd3 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -305,11 +305,11 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (AllocaInst* A = dyn_cast<AllocaInst>(*I)) {
if (ConstantInt* C = dyn_cast<ConstantInt>(A->getArraySize()))
pointerSize = C->getZExtValue() *
- TD.getTypePaddedSize(A->getAllocatedType());
+ TD.getTypeAllocSize(A->getAllocatedType());
} else {
const PointerType* PT = cast<PointerType>(
cast<Argument>(*I)->getType());
- pointerSize = TD.getTypePaddedSize(PT->getElementType());
+ pointerSize = TD.getTypeAllocSize(PT->getElementType());
}
// See if the call site touches it
@@ -382,10 +382,10 @@ bool DSE::RemoveUndeadPointers(Value* killPointer, uint64_t killPointerSize,
if (AllocaInst* A = dyn_cast<AllocaInst>(*I)) {
if (ConstantInt* C = dyn_cast<ConstantInt>(A->getArraySize()))
pointerSize = C->getZExtValue() *
- TD.getTypePaddedSize(A->getAllocatedType());
+ TD.getTypeAllocSize(A->getAllocatedType());
} else {
const PointerType* PT = cast<PointerType>(cast<Argument>(*I)->getType());
- pointerSize = TD.getTypePaddedSize(PT->getElementType());
+ pointerSize = TD.getTypeAllocSize(PT->getElementType());
}
// See if this pointer could alias it
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index b80fc4991a..c76403e039 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -5202,7 +5202,7 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) {
for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
++i, ++GTI) {
Value *Op = *i;
- uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType()) & PtrSizeMask;
+ uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
if (OpC->isZero()) continue;
@@ -5294,7 +5294,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
- uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType());
+ uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*CI->getSExtValue();
}
} else {
@@ -5310,7 +5310,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
Value *VariableIdx = GEP->getOperand(i);
// Determine the scale factor of the variable element. For example, this is
// 4 if the variable index is into an array of i32.
- uint64_t VariableScale = TD.getTypePaddedSize(GTI.getIndexedType());
+ uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
// Verify that there are no other variable indices. If so, emit the hard way.
for (++i, ++GTI; i != e; ++i, ++GTI) {
@@ -5324,7 +5324,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
- uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType());
+ uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*CI->getSExtValue();
}
}
@@ -7606,8 +7606,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
CastElTyAlign == AllocElTyAlign) return 0;
- uint64_t AllocElTySize = TD->getTypePaddedSize(AllocElTy);
- uint64_t CastElTySize = TD->getTypePaddedSize(CastElTy);
+ uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
+ uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
if (CastElTySize == 0 || AllocElTySize == 0) return 0;
// See if we can satisfy the modulus by pulling a scale out of the array
@@ -7905,7 +7905,7 @@ static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
// is something like [0 x {int, int}]
const Type *IntPtrTy = TD->getIntPtrType();
int64_t FirstIdx = 0;
- if (int64_t TySize = TD->getTypePaddedSize(Ty)) {
+ if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
Offset -= FirstIdx*TySize;
@@ -7937,7 +7937,7 @@ static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
Offset -= SL->getElementOffset(Elt);
Ty = STy->getElementType(Elt);
} else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
- uint64_t EltSize = TD->getTypePaddedSize(AT->getElementType());
+ uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
assert(EltSize && "Cannot index into a zero-sized array");
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
Offset %= EltSize;
@@ -8687,7 +8687,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
// is a single-index GEP.
if (X->getType() == CI.getType()) {
// Get the size of the pointee type.
- uint64_t Size = TD->getTypePaddedSize(DestPointee);
+ uint64_t Size = TD->getTypeAllocSize(DestPointee);
// Convert the constant to intptr type.
APInt Offset = Cst->getValue();
@@ -8707,7 +8707,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
// "inttoptr+GEP" instead of "add+intptr".
// Get the size of the pointee type.
- uint64_t Size = TD->getTypePaddedSize(DestPointee);
+ uint64_t Size = TD->getTypeAllocSize(DestPointee);
// Convert the constant to intptr type.
APInt Offset = Cst->getValue();
@@ -9811,7 +9811,7 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
if (!SrcTy->isSized() || !DstTy->isSized())
return false;
- if (TD->getTypePaddedSize(SrcTy) != TD->getTypePaddedSize(DstTy))
+ if (TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
return false;
return true;
}
@@ -10966,8 +10966,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
if (isa<ArrayType>(SrcElTy) &&
- TD->getTypePaddedSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
- TD->getTypePaddedSize(ResElTy)) {
+ TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
+ TD->getTypeAllocSize(ResElTy)) {
Value *Idx[2];
Idx[0] = Constant::getNullValue(Type::Int32Ty);
Idx[1] = GEP.getOperand(1);
@@ -10984,7 +10984,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (isa<ArrayType>(SrcElTy) && ResElTy == Type::Int8Ty) {
uint64_t ArrayEltSize =
- TD->getTypePaddedSize(cast<ArrayType>(SrcElTy)->getElementType());
+ TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
// Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
// allow either a mul, shift, or constant here.
@@ -11137,7 +11137,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
// If alloca'ing a zero byte object, replace the alloca with a null pointer.
// Note that we only do this for alloca's, because malloc should allocate
// and return a unique pointer, even for a zero byte allocation.
- if (TD->getTypePaddedSize(AI.getAllocatedType()) == 0)
+ if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
// If the alignment is 0 (unspecified), assign it the preferred alignment.
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index b6c4c45274..5cf05183ec 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -104,7 +104,7 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
// Otherwise, we have a sequential type like an array or vector. Multiply
// the index by the ElementSize.
- uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType());
+ uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*OpC->getSExtValue();
}
@@ -511,7 +511,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
if (!srcArraySize)
return false;
- uint64_t srcSize = TD.getTypePaddedSize(srcAlloca->getAllocatedType()) *
+ uint64_t srcSize = TD.getTypeAllocSize(srcAlloca->getAllocatedType()) *
srcArraySize->getZExtValue();
if (cpyLength->getZExtValue() < srcSize)
@@ -526,7 +526,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
if (!destArraySize)
return false;
- uint64_t destSize = TD.getTypePaddedSize(A->getAllocatedType()) *
+ uint64_t destSize = TD.getTypeAllocSize(A->getAllocatedType()) *
destArraySize->getZExtValue();
if (destSize < srcSize)
@@ -538,7 +538,7 @@ bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
return false;
const Type* StructTy = cast<PointerType>(A->getType())->getElementType();
- uint64_t destSize = TD.getTypePaddedSize(StructTy);
+ uint64_t destSize = TD.getTypeAllocSize(StructTy);
if (destSize < srcSize)
return false;
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index db6500c930..5e2859abae 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -252,7 +252,7 @@ bool SROA::performScalarRepl(Function &F) {
// transform the allocation instruction if it is an array allocation
// (allocations OF arrays are ok though), and an allocation of a scalar
// value cannot be decomposed at all.
- uint64_t AllocaSize = TD->getTypePaddedSize(AI->getAllocatedType());
+ uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
// Do not promote any struct whose size is too big.
if (AllocaSize > SRThreshold) continue;
@@ -601,7 +601,7 @@ void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
// If not the whole aggregate, give up.
if (Length->getZExtValue() !=
- TD->getTypePaddedSize(AI->getType()->getElementType()))
+ TD->getTypeAllocSize(AI->getType()->getElementType()))
return MarkUnsafe(Info);
// We only know about memcpy/memset/memmove.
@@ -637,8 +637,8 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
// cast a {i32,i32}* to i64* and store through it. This is similar to the
// memcpy case and occurs in various "byval" cases and emulated memcpys.
if (isa<IntegerType>(SI->getOperand(0)->getType()) &&
- TD->getTypePaddedSize(SI->getOperand(0)->getType()) ==
- TD->getTypePaddedSize(AI->getType()->getElementType())) {
+ TD->getTypeAllocSize(SI->getOperand(0)->getType()) ==
+ TD->getTypeAllocSize(AI->getType()->getElementType())) {
Info.isMemCpyDst = true;
continue;
}
@@ -652,8 +652,8 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
// cast a {i32,i32}* to i64* and load through it. This is similar to the
// memcpy case and occurs in various "byval" cases and emulated memcpys.
if (isa<IntegerType>(LI->getType()) &&
- TD->getTypePaddedSize(LI->getType()) ==
- TD->getTypePaddedSize(AI->getType()->getElementType())) {
+ TD->getTypeAllocSize(LI->getType()) ==
+ TD->getTypeAllocSize(AI->getType()->getElementType())) {
Info.isMemCpySrc = true;
continue;
}
@@ -782,7 +782,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
} else {
const Type *EltTy =
cast<SequentialType>(OtherPtr->getType())->getElementType();
- EltOffset = TD->getTypePaddedSize(EltTy)*i;
+ EltOffset = TD->getTypeAllocSize(EltTy)*i;
}
// The alignment of the other pointer is the guaranteed alignment of the
@@ -865,7 +865,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
MI);
- unsigned EltSize = TD->getTypePaddedSize(EltTy);
+ unsigned EltSize = TD->getTypeAllocSize(EltTy);
// Finally, insert the meminst for this element.
if (isa<MemTransferInst>(MI)) {
@@ -899,7 +899,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
// and store the element value to the individual alloca.
Value *SrcVal = SI->getOperand(0);
const Type *AllocaEltTy = AI->getType()->getElementType();
- uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy);
+ uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
// If this isn't a store of an integer to the whole alloca, it may be a store
// to the first element. Just ignore the store in this case and normal SROA
@@ -922,7 +922,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
uint64_t Shift = Layout->getElementOffsetInBits(i);
if (TD->isBigEndian())
- Shift = AllocaSizeBits-Shift-TD->getTypePaddedSizeInBits(FieldTy);
+ Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
Value *EltVal = SrcVal;
if (Shift) {
@@ -957,7 +957,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
} else {
const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
const Type *ArrayEltTy = ATy->getElementType();
- uint64_t ElementOffset = TD->getTypePaddedSizeInBits(ArrayEltTy);
+ uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
uint64_t Shift;
@@ -1012,7 +1012,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
// Extract each element out of the NewElts according to its structure offset
// and form the result value.
const Type *AllocaEltTy = AI->getType()->getElementType();
- uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy);
+ uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
// If this isn't a load of the whole alloca to an integer, it may be a load
// of the first element. Just ignore the load in this case and normal SROA
@@ -1032,7 +1032,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
Layout = TD->getStructLayout(EltSTy);
} else {
const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
- ArrayEltBitOffset = TD->getTypePaddedSizeInBits(ArrayEltTy);
+ ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
}
Value *ResultVal = Constant::getNullValue(LI->getType());
@@ -1126,7 +1126,7 @@ static bool HasPadding(const Type *Ty, const TargetData &TD) {
} else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
return HasPadding(VTy->getElementType(), TD);
}
- return TD.getTypeSizeInBits(Ty) != TD.getTypePaddedSizeInBits(Ty);
+ return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
}
/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
@@ -1527,7 +1527,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
// Otherwise it must be an element access.
unsigned Elt = 0;
if (Offset) {
- unsigned EltSize = TD->getTypePaddedSizeInBits(VTy->getElementType());
+ unsigned EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
Elt = Offset/EltSize;
assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
}
@@ -1555,7 +1555,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
}
if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
- uint64_t EltSize = TD->getTypePaddedSizeInBits(AT->getElementType());
+ uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
Value *Res = UndefValue::get(AT);
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
@@ -1630,15 +1630,15 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
const Type *AllocaType = Old->getType();
if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
- uint64_t VecSize = TD->getTypePaddedSizeInBits(VTy);
- uint64_t ValSize = TD->getTypePaddedSizeInBits(SV->getType());
+ uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy);
+ uint64_t ValSize = TD->getTypeAllocSizeInBits(SV->getType());
// Changing the whole vector with memset or with an access of a different
// vector type?
if (ValSize == VecSize)
return Builder.CreateBitCast(SV, AllocaType, "tmp");
- uint64_t EltSize = TD->getTypePaddedSizeInBits(VTy->getElementType());
+ uint64_t EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
// Must be an element insertion.
unsigned Elt = Offset/EltSize;
@@ -1665,7 +1665,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
}
if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
- uint64_t EltSize = TD->getTypePaddedSizeInBits(AT->getElementType());
+ uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);