diff options
Diffstat (limited to 'lib/Transforms/Scalar/JumpThreading.cpp')
-rw-r--r-- | lib/Transforms/Scalar/JumpThreading.cpp | 42 |
1 files changed, 22 insertions, 20 deletions
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp index b3ec2fc84c..067deb7d78 100644 --- a/lib/Transforms/Scalar/JumpThreading.cpp +++ b/lib/Transforms/Scalar/JumpThreading.cpp @@ -27,10 +27,10 @@ #include "llvm/IR/DataLayout.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" +#include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/ValueHandle.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" @@ -76,7 +76,7 @@ namespace { /// revectored to the false side of the second if. /// class JumpThreading : public FunctionPass { - DataLayout *TD; + const DataLayout *DL; TargetLibraryInfo *TLI; LazyValueInfo *LVI; #ifdef NDEBUG @@ -105,9 +105,9 @@ namespace { initializeJumpThreadingPass(*PassRegistry::getPassRegistry()); } - bool runOnFunction(Function &F); + bool runOnFunction(Function &F) override; - virtual void getAnalysisUsage(AnalysisUsage &AU) const { + void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<LazyValueInfo>(); AU.addPreserved<LazyValueInfo>(); AU.addRequired<TargetLibraryInfo>(); @@ -148,8 +148,12 @@ FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); } /// runOnFunction - Top level algorithm. /// bool JumpThreading::runOnFunction(Function &F) { + if (skipOptnoneFunction(F)) + return false; + DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); - TD = getAnalysisIfAvailable<DataLayout>(); + DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); + DL = DLP ? &DLP->getDataLayout() : 0; TLI = &getAnalysis<TargetLibraryInfo>(); LVI = &getAnalysis<LazyValueInfo>(); @@ -251,7 +255,7 @@ static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB, // as having cost of 2 total, and if they are a vector intrinsic, we model // them as having cost 1. if (const CallInst *CI = dyn_cast<CallInst>(I)) { - if (CI->hasFnAttr(Attribute::NoDuplicate)) + if (CI->cannotDuplicate()) // Blocks with NoDuplicate are modelled as having infinite cost, so they // are never duplicated. return ~0U; @@ -490,7 +494,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result, Value *LHS = PN->getIncomingValue(i); Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB); - Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, TD); + Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL); if (Res == 0) { if (!isa<Constant>(RHS)) continue; @@ -692,7 +696,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { // Run constant folding to see if we can reduce the condition to a simple // constant. if (Instruction *I = dyn_cast<Instruction>(Condition)) { - Value *SimpleVal = ConstantFoldInstruction(I, TD, TLI); + Value *SimpleVal = ConstantFoldInstruction(I, DL, TLI); if (SimpleVal) { I->replaceAllUsesWith(SimpleVal); I->eraseFromParent(); @@ -1431,16 +1435,15 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB, for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) { // Scan all uses of this instruction to see if it is used outside of its // block, and if so, record them in UsesToRename. - for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; - ++UI) { - Instruction *User = cast<Instruction>(*UI); + for (Use &U : I->uses()) { + Instruction *User = cast<Instruction>(U.getUser()); if (PHINode *UserPN = dyn_cast<PHINode>(User)) { - if (UserPN->getIncomingBlock(UI) == BB) + if (UserPN->getIncomingBlock(U) == BB) continue; } else if (User->getParent() == BB) continue; - UsesToRename.push_back(&UI.getUse()); + UsesToRename.push_back(&U); } // If there are no uses outside the block, we're done with this instruction. @@ -1475,7 +1478,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB, // At this point, the IR is fully up to date and consistent. Do a quick scan // over the new instructions and zap any that are constants or dead. This // frequently happens because of phi translation. - SimplifyInstructionsInBlock(NewBB, TD, TLI); + SimplifyInstructionsInBlock(NewBB, DL, TLI); // Threaded an edge! ++NumThreads; @@ -1557,7 +1560,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB, // If this instruction can be simplified after the operands are updated, // just use the simplified value instead. This frequently happens due to // phi translation. - if (Value *IV = SimplifyInstruction(New, TD)) { + if (Value *IV = SimplifyInstruction(New, DL)) { delete New; ValueMapping[BI] = IV; } else { @@ -1585,16 +1588,15 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB, for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) { // Scan all uses of this instruction to see if it is used outside of its // block, and if so, record them in UsesToRename. - for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; - ++UI) { - Instruction *User = cast<Instruction>(*UI); + for (Use &U : I->uses()) { + Instruction *User = cast<Instruction>(U.getUser()); if (PHINode *UserPN = dyn_cast<PHINode>(User)) { - if (UserPN->getIncomingBlock(UI) == BB) + if (UserPN->getIncomingBlock(U) == BB) continue; } else if (User->getParent() == BB) continue; - UsesToRename.push_back(&UI.getUse()); + UsesToRename.push_back(&U); } // If there are no uses outside the block, we're done with this instruction. |